summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig45
-rw-r--r--lib/Kconfig.debug128
-rw-r--r--lib/Kconfig.ubsan25
-rw-r--r--lib/Makefile48
-rw-r--r--lib/atomic64_test.c2
-rw-r--r--lib/cpu_rmap.c2
-rw-r--r--lib/cpumask.c37
-rw-r--r--lib/crc32.c21
-rw-r--r--lib/crc64-rocksoft.c126
-rw-r--r--lib/crc64.c49
-rw-r--r--lib/crc7.c6
-rw-r--r--lib/dump_stack.c4
-rw-r--r--lib/dynamic_queue_limits.c2
-rw-r--r--lib/gen_crc64table.c10
-rw-r--r--lib/iomap.c40
-rw-r--r--lib/math/Makefile5
-rw-r--r--lib/math/prime_numbers.c91
-rw-r--r--lib/math/prime_numbers_private.h16
-rw-r--r--lib/math/tests/Makefile8
-rw-r--r--lib/math/tests/gcd_kunit.c56
-rw-r--r--lib/math/tests/int_log_kunit.c74
-rw-r--r--lib/math/tests/prime_numbers_kunit.c59
-rw-r--r--lib/math/tests/rational_kunit.c (renamed from lib/math/rational-test.c)0
-rw-r--r--lib/net_utils.c4
-rw-r--r--lib/test_bitmap.c28
-rw-r--r--lib/test_objpool.c3
-rw-r--r--lib/test_ubsan.c18
-rw-r--r--lib/tests/Makefile44
-rw-r--r--lib/tests/bitfield_kunit.c (renamed from lib/bitfield_kunit.c)0
-rw-r--r--lib/tests/blackhole_dev_kunit.c (renamed from lib/test_blackhole_dev.c)47
-rw-r--r--lib/tests/checksum_kunit.c (renamed from lib/checksum_kunit.c)0
-rw-r--r--lib/tests/cmdline_kunit.c (renamed from lib/cmdline_kunit.c)0
-rw-r--r--lib/tests/cpumask_kunit.c (renamed from lib/cpumask_kunit.c)0
-rw-r--r--lib/tests/crc_kunit.c (renamed from lib/crc_kunit.c)68
-rw-r--r--lib/tests/fortify_kunit.c (renamed from lib/fortify_kunit.c)156
-rw-r--r--lib/tests/hashtable_test.c (renamed from lib/hashtable_test.c)0
-rw-r--r--lib/tests/is_signed_type_kunit.c (renamed from lib/is_signed_type_kunit.c)0
-rw-r--r--lib/tests/kfifo_kunit.c224
-rw-r--r--lib/tests/kunit_iov_iter.c (renamed from lib/kunit_iov_iter.c)0
-rw-r--r--lib/tests/list-test.c (renamed from lib/list-test.c)0
-rw-r--r--lib/tests/memcpy_kunit.c (renamed from lib/memcpy_kunit.c)0
-rw-r--r--lib/tests/overflow_kunit.c (renamed from lib/overflow_kunit.c)38
-rw-r--r--lib/tests/printf_kunit.c (renamed from lib/test_printf.c)442
-rw-r--r--lib/tests/scanf_kunit.c (renamed from lib/test_scanf.c)295
-rw-r--r--lib/tests/siphash_kunit.c (renamed from lib/siphash_kunit.c)0
-rw-r--r--lib/tests/slub_kunit.c (renamed from lib/slub_kunit.c)59
-rw-r--r--lib/tests/stackinit_kunit.c (renamed from lib/stackinit_kunit.c)30
-rw-r--r--lib/tests/string_helpers_kunit.c (renamed from lib/string_helpers_kunit.c)0
-rw-r--r--lib/tests/string_kunit.c (renamed from lib/string_kunit.c)4
-rw-r--r--lib/tests/test_bits.c (renamed from lib/test_bits.c)0
-rw-r--r--lib/tests/test_fprobe.c (renamed from lib/test_fprobe.c)0
-rw-r--r--lib/tests/test_hash.c (renamed from lib/test_hash.c)0
-rw-r--r--lib/tests/test_kprobes.c (renamed from lib/test_kprobes.c)0
-rw-r--r--lib/tests/test_linear_ranges.c (renamed from lib/test_linear_ranges.c)0
-rw-r--r--lib/tests/test_list_sort.c (renamed from lib/test_list_sort.c)0
-rw-r--r--lib/tests/test_sort.c (renamed from lib/test_sort.c)0
-rw-r--r--lib/tests/usercopy_kunit.c (renamed from lib/usercopy_kunit.c)0
-rw-r--r--lib/tests/util_macros_kunit.c (renamed from lib/util_macros_kunit.c)0
-rw-r--r--lib/ubsan.c28
-rw-r--r--lib/ubsan.h8
-rw-r--r--lib/vdso/Kconfig5
-rw-r--r--lib/vdso/Makefile19
-rw-r--r--lib/vdso/Makefile.include18
-rw-r--r--lib/vdso/datastore.c129
-rw-r--r--lib/vdso/getrandom.c8
-rw-r--r--lib/vdso/gettimeofday.c196
-rw-r--r--lib/vsprintf.c7
-rw-r--r--lib/zstd/Makefile3
-rw-r--r--lib/zstd/common/allocations.h56
-rw-r--r--lib/zstd/common/bits.h150
-rw-r--r--lib/zstd/common/bitstream.h155
-rw-r--r--lib/zstd/common/compiler.h151
-rw-r--r--lib/zstd/common/cpu.h3
-rw-r--r--lib/zstd/common/debug.c9
-rw-r--r--lib/zstd/common/debug.h37
-rw-r--r--lib/zstd/common/entropy_common.c42
-rw-r--r--lib/zstd/common/error_private.c13
-rw-r--r--lib/zstd/common/error_private.h88
-rw-r--r--lib/zstd/common/fse.h103
-rw-r--r--lib/zstd/common/fse_decompress.c132
-rw-r--r--lib/zstd/common/huf.h240
-rw-r--r--lib/zstd/common/mem.h3
-rw-r--r--lib/zstd/common/portability_macros.h47
-rw-r--r--lib/zstd/common/zstd_common.c38
-rw-r--r--lib/zstd/common/zstd_deps.h16
-rw-r--r--lib/zstd/common/zstd_internal.h153
-rw-r--r--lib/zstd/compress/clevels.h3
-rw-r--r--lib/zstd/compress/fse_compress.c74
-rw-r--r--lib/zstd/compress/hist.c13
-rw-r--r--lib/zstd/compress/hist.h10
-rw-r--r--lib/zstd/compress/huf_compress.c441
-rw-r--r--lib/zstd/compress/zstd_compress.c3293
-rw-r--r--lib/zstd/compress/zstd_compress_internal.h621
-rw-r--r--lib/zstd/compress/zstd_compress_literals.c157
-rw-r--r--lib/zstd/compress/zstd_compress_literals.h25
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.c21
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.h16
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.c394
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.h3
-rw-r--r--lib/zstd/compress/zstd_cwksp.h222
-rw-r--r--lib/zstd/compress/zstd_double_fast.c245
-rw-r--r--lib/zstd/compress/zstd_double_fast.h27
-rw-r--r--lib/zstd/compress/zstd_fast.c703
-rw-r--r--lib/zstd/compress/zstd_fast.h16
-rw-r--r--lib/zstd/compress/zstd_lazy.c840
-rw-r--r--lib/zstd/compress/zstd_lazy.h195
-rw-r--r--lib/zstd/compress/zstd_ldm.c102
-rw-r--r--lib/zstd/compress/zstd_ldm.h17
-rw-r--r--lib/zstd/compress/zstd_ldm_geartab.h3
-rw-r--r--lib/zstd/compress/zstd_opt.c571
-rw-r--r--lib/zstd/compress/zstd_opt.h55
-rw-r--r--lib/zstd/compress/zstd_preSplit.c239
-rw-r--r--lib/zstd/compress/zstd_preSplit.h34
-rw-r--r--lib/zstd/decompress/huf_decompress.c887
-rw-r--r--lib/zstd/decompress/zstd_ddict.c9
-rw-r--r--lib/zstd/decompress/zstd_ddict.h3
-rw-r--r--lib/zstd/decompress/zstd_decompress.c375
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.c724
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.h10
-rw-r--r--lib/zstd/decompress/zstd_decompress_internal.h19
-rw-r--r--lib/zstd/decompress_sources.h2
-rw-r--r--lib/zstd/zstd_common_module.c5
-rw-r--r--lib/zstd/zstd_compress_module.c75
-rw-r--r--lib/zstd/zstd_decompress_module.c4
124 files changed, 9421 insertions, 5203 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index dccb61b7d698..61cce0686b53 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -168,15 +168,6 @@ config CRC_T10DIF_ARCH
tristate
default CRC_T10DIF if ARCH_HAS_CRC_T10DIF && CRC_OPTIMIZATIONS
-config CRC64_ROCKSOFT
- tristate "CRC calculation for the Rocksoft model CRC64"
- select CRC64
- select CRYPTO
- select CRYPTO_CRC64_ROCKSOFT
- help
- This option provides a CRC64 API to a registered crypto driver.
- This is used with the block layer's data integrity subsystem.
-
config CRC_ITU_T
tristate "CRC ITU-T V.41 functions"
help
@@ -203,42 +194,30 @@ config CRC32_ARCH
default CRC32 if ARCH_HAS_CRC32 && CRC_OPTIMIZATIONS
config CRC64
- tristate "CRC64 functions"
- help
- This option is provided for the case where no in-kernel-tree
- modules require CRC64 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC64
- functions require M here.
+ tristate
+
+config ARCH_HAS_CRC64
+ bool
+
+config CRC64_ARCH
+ tristate
+ default CRC64 if ARCH_HAS_CRC64 && CRC_OPTIMIZATIONS
config CRC4
- tristate "CRC4 functions"
- help
- This option is provided for the case where no in-kernel-tree
- modules require CRC4 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC4
- functions require M here.
+ tristate
config CRC7
- tristate "CRC7 functions"
- help
- This option is provided for the case where no in-kernel-tree
- modules require CRC7 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC7
- functions require M here.
+ tristate
config LIBCRC32C
- tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
+ tristate
select CRC32
help
This option just selects CRC32 and is provided for compatibility
purposes until the users are updated to select CRC32 directly.
config CRC8
- tristate "CRC8 function"
- help
- This option provides CRC8 function. Drivers may select this
- when they need to do cyclic redundancy check according CRC8
- algorithm. Module will be called crc8.
+ tristate
config CRC_OPTIMIZATIONS
bool "Enable optimized CRC implementations" if EXPERT
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 35796c290ca3..8fb1d685f696 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -545,6 +545,17 @@ config FRAME_POINTER
config OBJTOOL
bool
+config OBJTOOL_WERROR
+ bool "Upgrade objtool warnings to errors"
+ depends on OBJTOOL && !COMPILE_TEST
+ help
+ Fail the build on objtool warnings.
+
+ Objtool warnings can indicate kernel instability, including boot
+ failures. This option is highly recommended.
+
+ If unsure, say Y.
+
config STACK_VALIDATION
bool "Compile-time stack metadata validation"
depends on HAVE_STACK_VALIDATION && UNWINDER_FRAME_POINTER
@@ -808,6 +819,15 @@ config ARCH_HAS_DEBUG_VM_PGTABLE
An architecture should select this when it can successfully
build and run DEBUG_VM_PGTABLE.
+config DEBUG_VFS
+ bool "Debug VFS"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on extended checks in the VFS layer that may impact
+ performance.
+
+ If unsure, say N.
+
config DEBUG_VM_IRQSOFF
def_bool DEBUG_VM && !PREEMPT_RT
@@ -1301,15 +1321,6 @@ endmenu # "Debug lockups and hangs"
menu "Scheduler Debugging"
-config SCHED_DEBUG
- bool "Collect scheduler debugging info"
- depends on DEBUG_KERNEL && DEBUG_FS
- default y
- help
- If you say Y here, the /sys/kernel/debug/sched file will be provided
- that can help debug the scheduler. The runtime overhead of this
- option is minimal.
-
config SCHED_INFO
bool
default n
@@ -2427,6 +2438,24 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime"
+config PRINTF_KUNIT_TEST
+ tristate "KUnit test printf() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the printf functions at runtime.
+
+ If unsure, say N.
+
+config SCANF_KUNIT_TEST
+ tristate "KUnit test scanf() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the scanf functions at runtime.
+
+ If unsure, say N.
+
config STRING_KUNIT_TEST
tristate "KUnit test string functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2440,12 +2469,6 @@ config STRING_HELPERS_KUNIT_TEST
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
-config TEST_PRINTF
- tristate "Test printf() family of functions at runtime"
-
-config TEST_SCANF
- tristate "Test scanf() family of functions at runtime"
-
config TEST_BITMAP
tristate "Test bitmap_*() family of functions at runtime"
help
@@ -2557,15 +2580,6 @@ config TEST_BPF
If unsure, say N.
-config TEST_BLACKHOLE_DEV
- tristate "Test blackhole netdev functionality"
- depends on m && NET
- help
- This builds the "test_blackhole_dev" module that validates the
- data path through this blackhole netdev.
-
- If unsure, say N.
-
config FIND_BIT_BENCHMARK
tristate "Test find_bit functions"
help
@@ -2691,6 +2705,20 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
+config KFIFO_KUNIT_TEST
+ tristate "KUnit Test for the generic kernel FIFO implementation" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the generic FIFO implementation KUnit test suite.
+ It tests that the API and basic functionality of the kfifo type
+ and associated macros.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config LIST_KUNIT_TEST
tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2852,6 +2880,7 @@ config CRC_KUNIT_TEST
tristate "KUnit tests for CRC functions" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
+ select CRC7
select CRC16
select CRC_T10DIF
select CRC32
@@ -2888,6 +2917,17 @@ config USERCOPY_KUNIT_TEST
on the copy_to/from_user infrastructure, making sure basic
user/kernel boundary testing is working.
+config BLACKHOLE_DEV_KUNIT_TEST
+ tristate "Test blackhole netdev functionality" if !KUNIT_ALL_TESTS
+ depends on NET
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the "blackhole_dev_kunit" module that validates the
+ data path through this blackhole netdev.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -3166,7 +3206,7 @@ config TEST_OBJPOOL
If unsure, say N.
-config INT_POW_TEST
+config INT_POW_KUNIT_TEST
tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
@@ -3197,6 +3237,44 @@ config INT_SQRT_KUNIT_TEST
If unsure, say N
+config INT_LOG_KUNIT_TEST
+ tristate "Integer log (int_log) test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the int_log library, which
+ provides two functions to compute the integer logarithm in base 2 and
+ base 10, called respectively as intlog2 and intlog10.
+
+ If unsure, say N
+
+config GCD_KUNIT_TEST
+ tristate "Greatest common divisor test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the gcd() function,
+ which computes the greatest common divisor of two numbers.
+
+ This test suite verifies the correctness of gcd() across various
+ scenarios, including edge cases.
+
+ If unsure, say N
+
+config PRIME_NUMBERS_KUNIT_TEST
+ tristate "Prime number generator test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the {is,next}_prime_number
+ functions.
+
+ Enabling this option will include tests that compare the prime number
+ generator functions against a brute force implementation.
+
+ If unsure, say N
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 1d4aa7a83b3a..4216b3a4ff21 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -116,21 +116,22 @@ config UBSAN_UNREACHABLE
This option enables -fsanitize=unreachable which checks for control
flow reaching an expected-to-be-unreachable position.
-config UBSAN_SIGNED_WRAP
- bool "Perform checking for signed arithmetic wrap-around"
+config UBSAN_INTEGER_WRAP
+ bool "Perform checking for integer arithmetic wrap-around"
default UBSAN
depends on !COMPILE_TEST
- # The no_sanitize attribute was introduced in GCC with version 8.
- depends on !CC_IS_GCC || GCC_VERSION >= 80000
+ depends on $(cc-option,-fsanitize-undefined-ignore-overflow-pattern=all)
depends on $(cc-option,-fsanitize=signed-integer-overflow)
- help
- This option enables -fsanitize=signed-integer-overflow which checks
- for wrap-around of any arithmetic operations with signed integers.
- This currently performs nearly no instrumentation due to the
- kernel's use of -fno-strict-overflow which converts all would-be
- arithmetic undefined behavior into wrap-around arithmetic. Future
- sanitizer versions will allow for wrap-around checking (rather than
- exclusively undefined behavior).
+ depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
+ depends on $(cc-option,-fsanitize=implicit-signed-integer-truncation)
+ depends on $(cc-option,-fsanitize=implicit-unsigned-integer-truncation)
+ depends on $(cc-option,-fsanitize-ignorelist=/dev/null)
+ help
+ This option enables all of the sanitizers involved in integer overflow
+ (wrap-around) mitigation: signed-integer-overflow, unsigned-integer-overflow,
+ implicit-signed-integer-truncation, and implicit-unsigned-integer-truncation.
+ This is currently limited only to the size_t type while testing and
+ compiler development continues.
config UBSAN_BOOL
bool "Perform checking for non-boolean values used as boolean"
diff --git a/lib/Makefile b/lib/Makefile
index d5cfc7afbbb8..f07b24ce1b3f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,6 +5,11 @@
ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
+# Branch profiling isn't noinstr-safe
+ifdef CONFIG_TRACE_BRANCH_PROFILING
+CFLAGS_smp_processor_id.o += -DDISABLE_BRANCH_PROFILING
+endif
+
# These files are disabled because they produce lots of non-interesting and/or
# flaky coverage that is not a function of syscall inputs. For example,
# rbtree can be global and individual rotations don't correlate with inputs.
@@ -52,9 +57,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o bitmap-str.o
-obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
obj-y += string_helpers.o
-obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-y += hexdump.o
obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
@@ -65,27 +68,20 @@ obj-$(CONFIG_TEST_DHRY) += test_dhry.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
CFLAGS_test_bitops.o += -Werror
-obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
-obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
-obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
CFLAGS_test_ubsan.o += $(call cc-disable-warning, unused-but-set-variable)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
-obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
-obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
-obj-$(CONFIG_TEST_PRINTF) += test_printf.o
-obj-$(CONFIG_TEST_SCANF) += test_scanf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy)
@@ -98,19 +94,14 @@ obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
-obj-$(CONFIG_TEST_RUNTIME) += tests/
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
-obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
obj-$(CONFIG_TEST_HMM) += test_hmm.o
obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
-obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
-CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
-obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
obj-$(CONFIG_TEST_OBJPOOL) += test_objpool.o
obj-$(CONFIG_TEST_FPU) += test_fpu.o
@@ -132,7 +123,7 @@ endif
obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
-obj-y += math/ crypto/
+obj-y += math/ crypto/ tests/ vdso/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
@@ -167,7 +158,6 @@ obj-$(CONFIG_CRC64) += crc64.o
obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_CRC8) += crc8.o
-obj-$(CONFIG_CRC64_ROCKSOFT) += crc64-rocksoft.o
obj-$(CONFIG_XXHASH) += xxhash.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
@@ -368,32 +358,6 @@ obj-$(CONFIG_OBJAGG) += objagg.o
# pldmfw library
obj-$(CONFIG_PLDMFW) += pldmfw/
-# KUnit tests
-CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
-obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
-obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
-obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
-obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
-obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
-obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
-obj-$(CONFIG_BITS_TEST) += test_bits.o
-obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
-obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
-obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
-obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
-CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
-obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
-CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
-obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
-CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
-CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
-CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
-CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
-obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
-obj-$(CONFIG_CRC_KUNIT_TEST) += crc_kunit.o
-obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
-obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
-
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 759ea1783cc5..d726068358c7 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -254,7 +254,7 @@ static __init int test_atomics_init(void)
pr_info("passed for %s platform %s CX8 and %s SSE\n",
#ifdef CONFIG_X86_64
"x86-64",
-#elif defined(CONFIG_X86_CMPXCHG64)
+#elif defined(CONFIG_X86_CX8)
"i586+",
#else
"i386+",
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index 4c348670da31..f03d9be3f06b 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -73,7 +73,7 @@ static void cpu_rmap_release(struct kref *ref)
* cpu_rmap_get - internal helper to get new ref on a cpu_rmap
* @rmap: reverse-map allocated with alloc_cpu_rmap()
*/
-static inline void cpu_rmap_get(struct cpu_rmap *rmap)
+void cpu_rmap_get(struct cpu_rmap *rmap)
{
kref_get(&rmap->refcount);
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 57274ba8b6d9..5adb9874fbd0 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -7,38 +7,6 @@
#include <linux/memblock.h>
#include <linux/numa.h>
-/**
- * cpumask_next_wrap - helper to implement for_each_cpu_wrap
- * @n: the cpu prior to the place to search
- * @mask: the cpumask pointer
- * @start: the start point of the iteration
- * @wrap: assume @n crossing @start terminates the iteration
- *
- * Return: >= nr_cpu_ids on completion
- *
- * Note: the @wrap argument is required for the start condition when
- * we cannot assume @start is set in @mask.
- */
-unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
-{
- unsigned int next;
-
-again:
- next = cpumask_next(n, mask);
-
- if (wrap && n < start && next >= start) {
- return nr_cpumask_bits;
-
- } else if (next >= nr_cpumask_bits) {
- wrap = true;
- n = -1;
- goto again;
- }
-
- return next;
-}
-EXPORT_SYMBOL(cpumask_next_wrap);
-
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
/**
@@ -171,8 +139,7 @@ unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
- next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits, prev + 1);
+ next = cpumask_next_and_wrap(prev, src1p, src2p);
if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);
@@ -192,7 +159,7 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp)
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
- next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
+ next = cpumask_next_wrap(prev, srcp);
if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);
diff --git a/lib/crc32.c b/lib/crc32.c
index ede6131f66fc..fddd424ff224 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -37,7 +37,7 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
-u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len)
+u32 crc32_le_base(u32 crc, const u8 *p, size_t len)
{
while (len--)
crc = (crc >> 8) ^ crc32table_le[(crc & 255) ^ *p++];
@@ -45,20 +45,20 @@ u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len)
}
EXPORT_SYMBOL(crc32_le_base);
-u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len)
+u32 crc32c_base(u32 crc, const u8 *p, size_t len)
{
while (len--)
crc = (crc >> 8) ^ crc32ctable_le[(crc & 255) ^ *p++];
return crc;
}
-EXPORT_SYMBOL(crc32c_le_base);
+EXPORT_SYMBOL(crc32c_base);
/*
* This multiplies the polynomials x and y modulo the given modulus.
* This follows the "little-endian" CRC convention that the lsbit
* represents the highest power of x, and the msbit represents x^0.
*/
-static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
+static u32 gf2_multiply(u32 x, u32 y, u32 modulus)
{
u32 product = x & 1 ? y : 0;
int i;
@@ -84,8 +84,7 @@ static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
* as appending len bytes of zero to the data), in time proportional
* to log(len).
*/
-static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
- u32 polynomial)
+static u32 crc32_generic_shift(u32 crc, size_t len, u32 polynomial)
{
u32 power = polynomial; /* CRC of x^32 */
int i;
@@ -114,19 +113,19 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
return crc;
}
-u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+u32 crc32_le_shift(u32 crc, size_t len)
{
return crc32_generic_shift(crc, len, CRC32_POLY_LE);
}
+EXPORT_SYMBOL(crc32_le_shift);
-u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+u32 crc32c_shift(u32 crc, size_t len)
{
return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
}
-EXPORT_SYMBOL(crc32_le_shift);
-EXPORT_SYMBOL(__crc32c_le_shift);
+EXPORT_SYMBOL(crc32c_shift);
-u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len)
+u32 crc32_be_base(u32 crc, const u8 *p, size_t len)
{
while (len--)
crc = (crc << 8) ^ crc32table_be[(crc >> 24) ^ *p++];
diff --git a/lib/crc64-rocksoft.c b/lib/crc64-rocksoft.c
deleted file mode 100644
index fc9ae0da5df7..000000000000
--- a/lib/crc64-rocksoft.c
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/crc64.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <crypto/hash.h>
-#include <crypto/algapi.h>
-#include <linux/static_key.h>
-#include <linux/notifier.h>
-
-static struct crypto_shash __rcu *crc64_rocksoft_tfm;
-static DEFINE_STATIC_KEY_TRUE(crc64_rocksoft_fallback);
-static DEFINE_MUTEX(crc64_rocksoft_mutex);
-static struct work_struct crc64_rocksoft_rehash_work;
-
-static int crc64_rocksoft_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- struct crypto_alg *alg = data;
-
- if (val != CRYPTO_MSG_ALG_LOADED ||
- strcmp(alg->cra_name, CRC64_ROCKSOFT_STRING))
- return NOTIFY_DONE;
-
- schedule_work(&crc64_rocksoft_rehash_work);
- return NOTIFY_OK;
-}
-
-static void crc64_rocksoft_rehash(struct work_struct *work)
-{
- struct crypto_shash *new, *old;
-
- mutex_lock(&crc64_rocksoft_mutex);
- old = rcu_dereference_protected(crc64_rocksoft_tfm,
- lockdep_is_held(&crc64_rocksoft_mutex));
- new = crypto_alloc_shash(CRC64_ROCKSOFT_STRING, 0, 0);
- if (IS_ERR(new)) {
- mutex_unlock(&crc64_rocksoft_mutex);
- return;
- }
- rcu_assign_pointer(crc64_rocksoft_tfm, new);
- mutex_unlock(&crc64_rocksoft_mutex);
-
- if (old) {
- synchronize_rcu();
- crypto_free_shash(old);
- } else {
- static_branch_disable(&crc64_rocksoft_fallback);
- }
-}
-
-static struct notifier_block crc64_rocksoft_nb = {
- .notifier_call = crc64_rocksoft_notify,
-};
-
-u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len)
-{
- struct {
- struct shash_desc shash;
- u64 crc;
- } desc;
- int err;
-
- if (static_branch_unlikely(&crc64_rocksoft_fallback))
- return crc64_rocksoft_generic(crc, buffer, len);
-
- rcu_read_lock();
- desc.shash.tfm = rcu_dereference(crc64_rocksoft_tfm);
- desc.crc = crc;
- err = crypto_shash_update(&desc.shash, buffer, len);
- rcu_read_unlock();
-
- BUG_ON(err);
-
- return desc.crc;
-}
-EXPORT_SYMBOL_GPL(crc64_rocksoft_update);
-
-u64 crc64_rocksoft(const unsigned char *buffer, size_t len)
-{
- return crc64_rocksoft_update(0, buffer, len);
-}
-EXPORT_SYMBOL_GPL(crc64_rocksoft);
-
-static int __init crc64_rocksoft_mod_init(void)
-{
- INIT_WORK(&crc64_rocksoft_rehash_work, crc64_rocksoft_rehash);
- crypto_register_notifier(&crc64_rocksoft_nb);
- crc64_rocksoft_rehash(&crc64_rocksoft_rehash_work);
- return 0;
-}
-
-static void __exit crc64_rocksoft_mod_fini(void)
-{
- crypto_unregister_notifier(&crc64_rocksoft_nb);
- cancel_work_sync(&crc64_rocksoft_rehash_work);
- crypto_free_shash(rcu_dereference_protected(crc64_rocksoft_tfm, 1));
-}
-
-module_init(crc64_rocksoft_mod_init);
-module_exit(crc64_rocksoft_mod_fini);
-
-static int crc64_rocksoft_transform_show(char *buffer, const struct kernel_param *kp)
-{
- struct crypto_shash *tfm;
- int len;
-
- if (static_branch_unlikely(&crc64_rocksoft_fallback))
- return sprintf(buffer, "fallback\n");
-
- rcu_read_lock();
- tfm = rcu_dereference(crc64_rocksoft_tfm);
- len = snprintf(buffer, PAGE_SIZE, "%s\n",
- crypto_shash_driver_name(tfm));
- rcu_read_unlock();
-
- return len;
-}
-
-module_param_call(transform, NULL, crc64_rocksoft_transform_show, NULL, 0444);
-
-MODULE_AUTHOR("Keith Busch <kbusch@kernel.org>");
-MODULE_DESCRIPTION("Rocksoft model CRC64 calculation (library API)");
-MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc64");
diff --git a/lib/crc64.c b/lib/crc64.c
index 61ae8dfb6a1c..5b1b17057f0a 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -22,8 +22,8 @@
* x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
* x^7 + x^4 + x + 1
*
- * crc64rocksoft[256] table is from the Rocksoft specification polynomial
- * defined as,
+ * crc64nvmetable[256] uses the CRC64 polynomial from the NVME NVM Command Set
+ * Specification and uses least-significant-bit first bit order:
*
* x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 + x^47 +
* x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 + x^26 + x^23 +
@@ -41,45 +41,18 @@
MODULE_DESCRIPTION("CRC64 calculations");
MODULE_LICENSE("GPL v2");
-/**
- * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
- * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
- * or the previous crc64 value if computing incrementally.
- * @p: pointer to buffer over which CRC64 is run
- * @len: length of buffer @p
- */
-u64 __pure crc64_be(u64 crc, const void *p, size_t len)
+u64 crc64_be_generic(u64 crc, const u8 *p, size_t len)
{
- size_t i, t;
-
- const unsigned char *_p = p;
-
- for (i = 0; i < len; i++) {
- t = ((crc >> 56) ^ (*_p++)) & 0xFF;
- crc = crc64table[t] ^ (crc << 8);
- }
-
+ while (len--)
+ crc = (crc << 8) ^ crc64table[(crc >> 56) ^ *p++];
return crc;
}
-EXPORT_SYMBOL_GPL(crc64_be);
+EXPORT_SYMBOL_GPL(crc64_be_generic);
-/**
- * crc64_rocksoft_generic - Calculate bitwise Rocksoft CRC64
- * @crc: seed value for computation. 0 for a new CRC calculation, or the
- * previous crc64 value if computing incrementally.
- * @p: pointer to buffer over which CRC64 is run
- * @len: length of buffer @p
- */
-u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len)
+u64 crc64_nvme_generic(u64 crc, const u8 *p, size_t len)
{
- const unsigned char *_p = p;
- size_t i;
-
- crc = ~crc;
-
- for (i = 0; i < len; i++)
- crc = (crc >> 8) ^ crc64rocksofttable[(crc & 0xff) ^ *_p++];
-
- return ~crc;
+ while (len--)
+ crc = (crc >> 8) ^ crc64nvmetable[(crc & 0xff) ^ *p++];
+ return crc;
}
-EXPORT_SYMBOL_GPL(crc64_rocksoft_generic);
+EXPORT_SYMBOL_GPL(crc64_nvme_generic);
diff --git a/lib/crc7.c b/lib/crc7.c
index 3848e313b722..8dd991cc6114 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -7,14 +7,13 @@
#include <linux/module.h>
#include <linux/crc7.h>
-
/*
* Table for CRC-7 (polynomial x^7 + x^3 + 1).
* This is a big-endian CRC (msbit is highest power of x),
* aligned so the msbit of the byte is the x^6 coefficient
* and the lsbit is not used.
*/
-const u8 crc7_be_syndrome_table[256] = {
+static const u8 crc7_be_syndrome_table[256] = {
0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
@@ -48,7 +47,6 @@ const u8 crc7_be_syndrome_table[256] = {
0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
};
-EXPORT_SYMBOL(crc7_be_syndrome_table);
/**
* crc7_be - update the CRC7 for the data buffer
@@ -65,7 +63,7 @@ EXPORT_SYMBOL(crc7_be_syndrome_table);
u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
{
while (len--)
- crc = crc7_be_byte(crc, *buffer++);
+ crc = crc7_be_syndrome_table[crc ^ *buffer++];
return crc;
}
EXPORT_SYMBOL(crc7_be);
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 388da1aea14a..b3a85fe8b673 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -54,7 +54,7 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
*/
void dump_stack_print_info(const char *log_lvl)
{
- printk("%sCPU: %d UID: %u PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
+ printk("%sCPU: %d UID: %u PID: %d Comm: %.20s %s%s %s %.*s %s " BUILD_ID_FMT "\n",
log_lvl, raw_smp_processor_id(),
__kuid_val(current_real_cred()->euid),
current->pid, current->comm,
@@ -62,7 +62,7 @@ void dump_stack_print_info(const char *log_lvl)
print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version, BUILD_ID_VAL);
+ init_utsname()->version, preempt_model_str(), BUILD_ID_VAL);
if (get_taint())
printk("%s%s\n", log_lvl, print_tainted_verbose());
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index c1b7638a594a..f97a752e900a 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -190,7 +190,7 @@ EXPORT_SYMBOL(dql_completed);
void dql_reset(struct dql *dql)
{
/* Reset all dynamic values */
- dql->limit = 0;
+ dql->limit = dql->min_limit;
dql->num_queued = 0;
dql->num_completed = 0;
dql->last_obj_cnt = 0;
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
index 55e222acd0b8..e05a4230a0a0 100644
--- a/lib/gen_crc64table.c
+++ b/lib/gen_crc64table.c
@@ -17,10 +17,10 @@
#include <stdio.h>
#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
-#define CRC64_ROCKSOFT_POLY 0x9A6C9329AC4BC9B5ULL
+#define CRC64_NVME_POLY 0x9A6C9329AC4BC9B5ULL
static uint64_t crc64_table[256] = {0};
-static uint64_t crc64_rocksoft_table[256] = {0};
+static uint64_t crc64_nvme_table[256] = {0};
static void generate_reflected_crc64_table(uint64_t table[256], uint64_t poly)
{
@@ -82,14 +82,14 @@ static void print_crc64_tables(void)
printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
output_table(crc64_table);
- printf("\nstatic const u64 ____cacheline_aligned crc64rocksofttable[256] = {\n");
- output_table(crc64_rocksoft_table);
+ printf("\nstatic const u64 ____cacheline_aligned crc64nvmetable[256] = {\n");
+ output_table(crc64_nvme_table);
}
int main(int argc, char *argv[])
{
generate_crc64_table(crc64_table, CRC64_ECMA182_POLY);
- generate_reflected_crc64_table(crc64_rocksoft_table, CRC64_ROCKSOFT_POLY);
+ generate_reflected_crc64_table(crc64_nvme_table, CRC64_NVME_POLY);
print_crc64_tables();
return 0;
}
diff --git a/lib/iomap.c b/lib/iomap.c
index 4f8b31baa575..a65717cd86f7 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
-#ifdef readq
+#ifdef CONFIG_64BIT
static u64 pio_read64_lo_hi(unsigned long port)
{
u64 lo, hi;
@@ -153,21 +153,21 @@ static u64 pio_read64be_hi_lo(unsigned long port)
}
__no_kmsan_checks
-u64 ioread64_lo_hi(const void __iomem *addr)
+u64 __ioread64_lo_hi(const void __iomem *addr)
{
IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
return 0xffffffffffffffffULL;
}
__no_kmsan_checks
-u64 ioread64_hi_lo(const void __iomem *addr)
+u64 __ioread64_hi_lo(const void __iomem *addr)
{
IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
return 0xffffffffffffffffULL;
}
__no_kmsan_checks
-u64 ioread64be_lo_hi(const void __iomem *addr)
+u64 __ioread64be_lo_hi(const void __iomem *addr)
{
IO_COND(addr, return pio_read64be_lo_hi(port),
return mmio_read64be(addr));
@@ -175,19 +175,19 @@ u64 ioread64be_lo_hi(const void __iomem *addr)
}
__no_kmsan_checks
-u64 ioread64be_hi_lo(const void __iomem *addr)
+u64 __ioread64be_hi_lo(const void __iomem *addr)
{
IO_COND(addr, return pio_read64be_hi_lo(port),
return mmio_read64be(addr));
return 0xffffffffffffffffULL;
}
-EXPORT_SYMBOL(ioread64_lo_hi);
-EXPORT_SYMBOL(ioread64_hi_lo);
-EXPORT_SYMBOL(ioread64be_lo_hi);
-EXPORT_SYMBOL(ioread64be_hi_lo);
+EXPORT_SYMBOL(__ioread64_lo_hi);
+EXPORT_SYMBOL(__ioread64_hi_lo);
+EXPORT_SYMBOL(__ioread64be_lo_hi);
+EXPORT_SYMBOL(__ioread64be_hi_lo);
-#endif /* readq */
+#endif /* CONFIG_64BIT */
#ifndef pio_write16be
#define pio_write16be(val,port) outw(swab16(val),port)
@@ -236,7 +236,7 @@ EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
-#ifdef writeq
+#ifdef CONFIG_64BIT
static void pio_write64_lo_hi(u64 val, unsigned long port)
{
outl(val, port);
@@ -261,7 +261,7 @@ static void pio_write64be_hi_lo(u64 val, unsigned long port)
pio_write32be(val, port + sizeof(u32));
}
-void iowrite64_lo_hi(u64 val, void __iomem *addr)
+void __iowrite64_lo_hi(u64 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
@@ -269,7 +269,7 @@ void iowrite64_lo_hi(u64 val, void __iomem *addr)
writeq(val, addr));
}
-void iowrite64_hi_lo(u64 val, void __iomem *addr)
+void __iowrite64_hi_lo(u64 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
@@ -277,7 +277,7 @@ void iowrite64_hi_lo(u64 val, void __iomem *addr)
writeq(val, addr));
}
-void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+void __iowrite64be_lo_hi(u64 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
@@ -285,7 +285,7 @@ void iowrite64be_lo_hi(u64 val, void __iomem *addr)
mmio_write64be(val, addr));
}
-void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+void __iowrite64be_hi_lo(u64 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
@@ -293,12 +293,12 @@ void iowrite64be_hi_lo(u64 val, void __iomem *addr)
mmio_write64be(val, addr));
}
-EXPORT_SYMBOL(iowrite64_lo_hi);
-EXPORT_SYMBOL(iowrite64_hi_lo);
-EXPORT_SYMBOL(iowrite64be_lo_hi);
-EXPORT_SYMBOL(iowrite64be_hi_lo);
+EXPORT_SYMBOL(__iowrite64_lo_hi);
+EXPORT_SYMBOL(__iowrite64_hi_lo);
+EXPORT_SYMBOL(__iowrite64be_lo_hi);
+EXPORT_SYMBOL(__iowrite64be_hi_lo);
-#endif /* readq */
+#endif /* CONFIG_64BIT */
/*
* These are the "repeat MMIO read/write" functions.
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 853f023ae537..d1caba23baa0 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -5,8 +5,7 @@ obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
obj-$(CONFIG_RATIONAL) += rational.o
-obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o
-obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
-obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += tests/int_sqrt_kunit.o \ No newline at end of file
+
+obj-y += tests/
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index 9a17ee9af93a..95a6f7960db9 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -1,16 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "prime numbers: " fmt
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/prime_numbers.h>
#include <linux/slab.h>
-struct primes {
- struct rcu_head rcu;
- unsigned long last, sz;
- unsigned long primes[];
-};
+#include "prime_numbers_private.h"
#if BITS_PER_LONG == 64
static const struct primes small_primes = {
@@ -62,9 +57,25 @@ static const struct primes small_primes = {
static DEFINE_MUTEX(lock);
static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
-static unsigned long selftest_max;
+#if IS_ENABLED(CONFIG_PRIME_NUMBERS_KUNIT_TEST)
+/*
+ * Calls the callback under RCU lock. The callback must not retain
+ * the primes pointer.
+ */
+void with_primes(void *ctx, primes_fn fn)
+{
+ rcu_read_lock();
+ fn(ctx, rcu_dereference(primes));
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(with_primes);
+
+EXPORT_SYMBOL(slow_is_prime_number);
-static bool slow_is_prime_number(unsigned long x)
+#else
+static
+#endif
+bool slow_is_prime_number(unsigned long x)
{
unsigned long y = int_sqrt(x);
@@ -239,77 +250,13 @@ bool is_prime_number(unsigned long x)
}
EXPORT_SYMBOL(is_prime_number);
-static void dump_primes(void)
-{
- const struct primes *p;
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-
- rcu_read_lock();
- p = rcu_dereference(primes);
-
- if (buf)
- bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
- pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s\n",
- p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
-
- rcu_read_unlock();
-
- kfree(buf);
-}
-
-static int selftest(unsigned long max)
-{
- unsigned long x, last;
-
- if (!max)
- return 0;
-
- for (last = 0, x = 2; x < max; x++) {
- bool slow = slow_is_prime_number(x);
- bool fast = is_prime_number(x);
-
- if (slow != fast) {
- pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!\n",
- x, slow ? "yes" : "no", fast ? "yes" : "no");
- goto err;
- }
-
- if (!slow)
- continue;
-
- if (next_prime_number(last) != x) {
- pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu\n",
- last, x, next_prime_number(last));
- goto err;
- }
- last = x;
- }
-
- pr_info("%s(%lu) passed, last prime was %lu\n", __func__, x, last);
- return 0;
-
-err:
- dump_primes();
- return -EINVAL;
-}
-
-static int __init primes_init(void)
-{
- return selftest(selftest_max);
-}
-
static void __exit primes_exit(void)
{
free_primes();
}
-module_init(primes_init);
module_exit(primes_exit);
-module_param_named(selftest, selftest_max, ulong, 0400);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Prime number library");
MODULE_LICENSE("GPL");
diff --git a/lib/math/prime_numbers_private.h b/lib/math/prime_numbers_private.h
new file mode 100644
index 000000000000..f3ebf5386e6b
--- /dev/null
+++ b/lib/math/prime_numbers_private.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/types.h>
+
+struct primes {
+ struct rcu_head rcu;
+ unsigned long last, sz;
+ unsigned long primes[];
+};
+
+#if IS_ENABLED(CONFIG_PRIME_NUMBERS_KUNIT_TEST)
+typedef void (*primes_fn)(void *, const struct primes *);
+
+void with_primes(void *ctx, primes_fn fn);
+bool slow_is_prime_number(unsigned long x);
+#endif
diff --git a/lib/math/tests/Makefile b/lib/math/tests/Makefile
index e1a79f093b2d..13dc96e48408 100644
--- a/lib/math/tests/Makefile
+++ b/lib/math/tests/Makefile
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o
-obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o
+obj-$(CONFIG_GCD_KUNIT_TEST) += gcd_kunit.o
+obj-$(CONFIG_INT_LOG_KUNIT_TEST) += int_log_kunit.o
+obj-$(CONFIG_INT_POW_KUNIT_TEST) += int_pow_kunit.o
+obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o
+obj-$(CONFIG_PRIME_NUMBERS_KUNIT_TEST) += prime_numbers_kunit.o
+obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational_kunit.o
diff --git a/lib/math/tests/gcd_kunit.c b/lib/math/tests/gcd_kunit.c
new file mode 100644
index 000000000000..ede1883583b1
--- /dev/null
+++ b/lib/math/tests/gcd_kunit.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/gcd.h>
+#include <linux/limits.h>
+
+struct test_case_params {
+ unsigned long val1;
+ unsigned long val2;
+ unsigned long expected_result;
+ const char *name;
+};
+
+static const struct test_case_params params[] = {
+ { 48, 18, 6, "GCD of 48 and 18" },
+ { 18, 48, 6, "GCD of 18 and 48" },
+ { 56, 98, 14, "GCD of 56 and 98" },
+ { 17, 13, 1, "Coprime numbers" },
+ { 101, 103, 1, "Coprime numbers" },
+ { 270, 192, 6, "GCD of 270 and 192" },
+ { 0, 5, 5, "GCD with zero" },
+ { 7, 0, 7, "GCD with zero reversed" },
+ { 36, 36, 36, "GCD of identical numbers" },
+ { ULONG_MAX, 1, 1, "GCD of max ulong and 1" },
+ { ULONG_MAX, ULONG_MAX, ULONG_MAX, "GCD of max ulong values" },
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(gcd, params, get_desc);
+
+static void gcd_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, gcd(tc->val1, tc->val2));
+}
+
+static struct kunit_case math_gcd_test_cases[] = {
+ KUNIT_CASE_PARAM(gcd_test, gcd_gen_params),
+ {}
+};
+
+static struct kunit_suite gcd_test_suite = {
+ .name = "math-gcd",
+ .test_cases = math_gcd_test_cases,
+};
+
+kunit_test_suite(gcd_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("math.gcd KUnit test suite");
+MODULE_AUTHOR("Yu-Chun Lin <eleanor15x@gmail.com>");
diff --git a/lib/math/tests/int_log_kunit.c b/lib/math/tests/int_log_kunit.c
new file mode 100644
index 000000000000..14e854146cb4
--- /dev/null
+++ b/lib/math/tests/int_log_kunit.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <kunit/test.h>
+#include <linux/int_log.h>
+
+struct test_case_params {
+ u32 value;
+ unsigned int expected_result;
+ const char *name;
+};
+
+
+/* The expected result takes into account the log error */
+static const struct test_case_params intlog2_params[] = {
+ {0, 0, "Log base 2 of 0"},
+ {1, 0, "Log base 2 of 1"},
+ {2, 16777216, "Log base 2 of 2"},
+ {3, 26591232, "Log base 2 of 3"},
+ {4, 33554432, "Log base 2 of 4"},
+ {8, 50331648, "Log base 2 of 8"},
+ {16, 67108864, "Log base 2 of 16"},
+ {32, 83886080, "Log base 2 of 32"},
+ {U32_MAX, 536870911, "Log base 2 of MAX"},
+};
+
+static const struct test_case_params intlog10_params[] = {
+ {0, 0, "Log base 10 of 0"},
+ {1, 0, "Log base 10 of 1"},
+ {6, 13055203, "Log base 10 of 6"},
+ {10, 16777225, "Log base 10 of 10"},
+ {100, 33554450, "Log base 10 of 100"},
+ {1000, 50331675, "Log base 10 of 1000"},
+ {10000, 67108862, "Log base 10 of 10000"},
+ {U32_MAX, 161614247, "Log base 10 of MAX"}
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+
+KUNIT_ARRAY_PARAM(intlog2, intlog2_params, get_desc);
+
+static void intlog2_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, intlog2(tc->value));
+}
+
+KUNIT_ARRAY_PARAM(intlog10, intlog10_params, get_desc);
+
+static void intlog10_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, intlog10(tc->value));
+}
+
+static struct kunit_case math_int_log_test_cases[] = {
+ KUNIT_CASE_PARAM(intlog2_test, intlog2_gen_params),
+ KUNIT_CASE_PARAM(intlog10_test, intlog10_gen_params),
+ {}
+};
+
+static struct kunit_suite int_log_test_suite = {
+ .name = "math-int_log",
+ .test_cases = math_int_log_test_cases,
+};
+
+kunit_test_suites(&int_log_test_suite);
+
+MODULE_DESCRIPTION("math.int_log KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/math/tests/prime_numbers_kunit.c b/lib/math/tests/prime_numbers_kunit.c
new file mode 100644
index 000000000000..2f1643208c66
--- /dev/null
+++ b/lib/math/tests/prime_numbers_kunit.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/module.h>
+#include <linux/prime_numbers.h>
+
+#include "../prime_numbers_private.h"
+
+static void dump_primes(void *ctx, const struct primes *p)
+{
+ static char buf[PAGE_SIZE];
+ struct kunit_suite *suite = ctx;
+
+ bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
+ kunit_info(suite, "primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
+ p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
+}
+
+static void prime_numbers_test(struct kunit *test)
+{
+ const unsigned long max = 65536;
+ unsigned long x, last, next;
+
+ for (last = 0, x = 2; x < max; x++) {
+ const bool slow = slow_is_prime_number(x);
+ const bool fast = is_prime_number(x);
+
+ KUNIT_ASSERT_EQ_MSG(test, slow, fast, "is-prime(%lu)", x);
+
+ if (!slow)
+ continue;
+
+ next = next_prime_number(last);
+ KUNIT_ASSERT_EQ_MSG(test, next, x, "next-prime(%lu)", last);
+ last = next;
+ }
+}
+
+static void kunit_suite_exit(struct kunit_suite *suite)
+{
+ with_primes(suite, dump_primes);
+}
+
+static struct kunit_case prime_numbers_cases[] = {
+ KUNIT_CASE(prime_numbers_test),
+ {},
+};
+
+static struct kunit_suite prime_numbers_suite = {
+ .name = "math-prime_numbers",
+ .suite_exit = kunit_suite_exit,
+ .test_cases = prime_numbers_cases,
+};
+
+kunit_test_suite(prime_numbers_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Prime number library");
+MODULE_LICENSE("GPL");
diff --git a/lib/math/rational-test.c b/lib/math/tests/rational_kunit.c
index 47486a95f088..47486a95f088 100644
--- a/lib/math/rational-test.c
+++ b/lib/math/tests/rational_kunit.c
diff --git a/lib/net_utils.c b/lib/net_utils.c
index 42bb0473fb22..215cda672fee 100644
--- a/lib/net_utils.c
+++ b/lib/net_utils.c
@@ -7,11 +7,9 @@
bool mac_pton(const char *s, u8 *mac)
{
- size_t maxlen = 3 * ETH_ALEN - 1;
int i;
- /* XX:XX:XX:XX:XX:XX */
- if (strnlen(s, maxlen) < maxlen)
+ if (strnlen(s, MAC_ADDR_STR_LEN) < MAC_ADDR_STR_LEN)
return false;
/* Don't dirty result unless string is valid MAC. */
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 65a75d58ed9e..c83829ef557f 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -100,34 +100,6 @@ __check_eq_pbl(const char *srcfile, unsigned int line,
return true;
}
-static bool __init
-__check_eq_u32_array(const char *srcfile, unsigned int line,
- const u32 *exp_arr, unsigned int exp_len,
- const u32 *arr, unsigned int len) __used;
-static bool __init
-__check_eq_u32_array(const char *srcfile, unsigned int line,
- const u32 *exp_arr, unsigned int exp_len,
- const u32 *arr, unsigned int len)
-{
- if (exp_len != len) {
- pr_warn("[%s:%u] array length differ: expected %u, got %u\n",
- srcfile, line,
- exp_len, len);
- return false;
- }
-
- if (memcmp(exp_arr, arr, len*sizeof(*arr))) {
- pr_warn("[%s:%u] array contents differ\n", srcfile, line);
- print_hex_dump(KERN_WARNING, " exp: ", DUMP_PREFIX_OFFSET,
- 32, 4, exp_arr, exp_len*sizeof(*exp_arr), false);
- print_hex_dump(KERN_WARNING, " got: ", DUMP_PREFIX_OFFSET,
- 32, 4, arr, len*sizeof(*arr), false);
- return false;
- }
-
- return true;
-}
-
static bool __init __check_eq_clump8(const char *srcfile, unsigned int line,
const unsigned int offset,
const unsigned int size,
diff --git a/lib/test_objpool.c b/lib/test_objpool.c
index 896c0131c9a8..8f688187fa87 100644
--- a/lib/test_objpool.c
+++ b/lib/test_objpool.c
@@ -190,8 +190,7 @@ static int ot_init_hrtimer(struct ot_item *item, unsigned long hrtimer)
return -ENOENT;
item->hrtcycle = ktime_set(0, hrtimer * 1000000UL);
- hrtimer_init(hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrt->function = ot_hrtimer_handler;
+ hrtimer_setup(hrt, ot_hrtimer_handler, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return 0;
}
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 5d7b10e98610..8772e5edaa4f 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -15,7 +15,7 @@ static void test_ubsan_add_overflow(void)
{
volatile int val = INT_MAX;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val += 2;
}
@@ -24,7 +24,7 @@ static void test_ubsan_sub_overflow(void)
volatile int val = INT_MIN;
volatile int val2 = 2;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val -= val2;
}
@@ -32,7 +32,7 @@ static void test_ubsan_mul_overflow(void)
{
volatile int val = INT_MAX / 2;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val *= 3;
}
@@ -40,7 +40,7 @@ static void test_ubsan_negate_overflow(void)
{
volatile int val = INT_MIN;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val = -val;
}
@@ -53,6 +53,15 @@ static void test_ubsan_divrem_overflow(void)
val /= val2;
}
+static void test_ubsan_truncate_signed(void)
+{
+ volatile long val = LONG_MAX;
+ volatile int val2 = 0;
+
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
+ val2 = val;
+}
+
static void test_ubsan_shift_out_of_bounds(void)
{
volatile int neg = -1, wrap = 4;
@@ -127,6 +136,7 @@ static const test_ubsan_fp test_ubsan_array[] = {
test_ubsan_sub_overflow,
test_ubsan_mul_overflow,
test_ubsan_negate_overflow,
+ test_ubsan_truncate_signed,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 8e4f42cb9c54..a434c7cb733a 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -1 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for tests of kernel library functions.
+
+# KUnit tests
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
+obj-$(CONFIG_BITS_TEST) += test_bits.o
+obj-$(CONFIG_BLACKHOLE_DEV_KUNIT_TEST) += blackhole_dev_kunit.o
+obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
+obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
+obj-$(CONFIG_CRC_KUNIT_TEST) += crc_kunit.o
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
+CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
+obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
+obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
+obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
+obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
+obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
+obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
+obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
+obj-$(CONFIG_KFIFO_KUNIT_TEST) += kfifo_kunit.o
+obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
+obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
+obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
+obj-$(CONFIG_PRINTF_KUNIT_TEST) += printf_kunit.o
+obj-$(CONFIG_SCANF_KUNIT_TEST) += scanf_kunit.o
+obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
+obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
+obj-$(CONFIG_TEST_SORT) += test_sort.o
+CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
+obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
+obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
+obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
+obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
+
obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/
diff --git a/lib/bitfield_kunit.c b/lib/tests/bitfield_kunit.c
index 5ccd86f61896..5ccd86f61896 100644
--- a/lib/bitfield_kunit.c
+++ b/lib/tests/bitfield_kunit.c
diff --git a/lib/test_blackhole_dev.c b/lib/tests/blackhole_dev_kunit.c
index ec290ac2a0d9..06834ab35f43 100644
--- a/lib/test_blackhole_dev.c
+++ b/lib/tests/blackhole_dev_kunit.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * This module tests the blackhole_dev that is created during the
+ * This tests the blackhole_dev that is created during the
* net subsystem initialization. The test this module performs is
* by injecting an skb into the stack with skb->dev as the
* blackhole_dev and expects kernel to behave in a sane manner
@@ -9,9 +9,8 @@
* Copyright (c) 2018, Mahesh Bandewar <maheshb@google.com>
*/
-#include <linux/init.h>
+#include <kunit/test.h>
#include <linux/module.h>
-#include <linux/printk.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/udp.h>
@@ -25,17 +24,15 @@
#define UDP_PORT 1234
-static int __init test_blackholedev_init(void)
+static void test_blackholedev(struct kunit *test)
{
struct ipv6hdr *ip6h;
struct sk_buff *skb;
struct udphdr *uh;
int data_len;
- int ret;
skb = alloc_skb(SKB_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ KUNIT_ASSERT_NOT_NULL(test, skb);
/* Reserve head-room for the headers */
skb_reserve(skb, HEAD_SIZE);
@@ -55,7 +52,7 @@ static int __init test_blackholedev_init(void)
ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
skb_set_network_header(skb, 0);
ip6h->hop_limit = 32;
- ip6h->payload_len = data_len + sizeof(struct udphdr);
+ ip6h->payload_len = htons(data_len + sizeof(struct udphdr));
ip6h->nexthdr = IPPROTO_UDP;
ip6h->saddr = in6addr_loopback;
ip6h->daddr = in6addr_loopback;
@@ -68,32 +65,20 @@ static int __init test_blackholedev_init(void)
skb->dev = blackhole_netdev;
/* Now attempt to send the packet */
- ret = dev_queue_xmit(skb);
-
- switch (ret) {
- case NET_XMIT_SUCCESS:
- pr_warn("dev_queue_xmit() returned NET_XMIT_SUCCESS\n");
- break;
- case NET_XMIT_DROP:
- pr_warn("dev_queue_xmit() returned NET_XMIT_DROP\n");
- break;
- case NET_XMIT_CN:
- pr_warn("dev_queue_xmit() returned NET_XMIT_CN\n");
- break;
- default:
- pr_err("dev_queue_xmit() returned UNKNOWN(%d)\n", ret);
- }
-
- return 0;
+ KUNIT_EXPECT_EQ(test, dev_queue_xmit(skb), NET_XMIT_SUCCESS);
}
-static void __exit test_blackholedev_exit(void)
-{
- pr_warn("test_blackholedev module terminating.\n");
-}
+static struct kunit_case blackholedev_cases[] = {
+ KUNIT_CASE(test_blackholedev),
+ {},
+};
+
+static struct kunit_suite blackholedev_suite = {
+ .name = "blackholedev",
+ .test_cases = blackholedev_cases,
+};
-module_init(test_blackholedev_init);
-module_exit(test_blackholedev_exit);
+kunit_test_suite(blackholedev_suite);
MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
MODULE_DESCRIPTION("module test of the blackhole_dev");
diff --git a/lib/checksum_kunit.c b/lib/tests/checksum_kunit.c
index be04aa42125c..be04aa42125c 100644
--- a/lib/checksum_kunit.c
+++ b/lib/tests/checksum_kunit.c
diff --git a/lib/cmdline_kunit.c b/lib/tests/cmdline_kunit.c
index c1602f797637..c1602f797637 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/tests/cmdline_kunit.c
diff --git a/lib/cpumask_kunit.c b/lib/tests/cpumask_kunit.c
index 6b62a6bdd50e..6b62a6bdd50e 100644
--- a/lib/cpumask_kunit.c
+++ b/lib/tests/cpumask_kunit.c
diff --git a/lib/crc_kunit.c b/lib/tests/crc_kunit.c
index 6a61d4b5fd45..585c48b65cef 100644
--- a/lib/crc_kunit.c
+++ b/lib/tests/crc_kunit.c
@@ -7,6 +7,7 @@
* Author: Eric Biggers <ebiggers@google.com>
*/
#include <kunit/test.h>
+#include <linux/crc7.h>
#include <linux/crc16.h>
#include <linux/crc-t10dif.h>
#include <linux/crc32.h>
@@ -32,7 +33,9 @@ static size_t test_buflen;
* @poly: The generator polynomial with the highest-order term omitted.
* Bit-reversed if @le is true.
* @func: The function to compute a CRC. The type signature uses u64 so that it
- * can fit any CRC up to CRC-64.
+ * can fit any CRC up to CRC-64. The CRC is passed in, and is expected
+ * to be returned in, the least significant bits of the u64. The
+ * function is expected to *not* invert the CRC at the beginning and end.
* @combine_func: Optional function to combine two CRCs.
*/
struct crc_variant {
@@ -223,8 +226,9 @@ crc_benchmark(struct kunit *test,
};
size_t len, i, j, num_iters;
/*
- * Some of the CRC library functions are marked as __pure, so use
- * volatile to ensure that all calls are really made as intended.
+ * The CRC value that this function computes in a series of calls to
+ * crc_func is never actually used, so use volatile to ensure that the
+ * computations are done as intended and don't all get optimized out.
*/
volatile u64 crc = 0;
u64 t;
@@ -251,6 +255,33 @@ crc_benchmark(struct kunit *test,
}
}
+/* crc7_be */
+
+static u64 crc7_be_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ /*
+ * crc7_be() left-aligns the 7-bit CRC in a u8, whereas the test wants a
+ * right-aligned CRC (in a u64). Convert between the conventions.
+ */
+ return crc7_be(crc << 1, p, len) >> 1;
+}
+
+static const struct crc_variant crc_variant_crc7_be = {
+ .bits = 7,
+ .poly = 0x9,
+ .func = crc7_be_wrapper,
+};
+
+static void crc7_be_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc7_be);
+}
+
+static void crc7_be_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc7_be_wrapper);
+}
+
/* crc16 */
static u64 crc16_wrapper(u64 crc, const u8 *p, size_t len)
@@ -362,7 +393,7 @@ static u64 crc32c_wrapper(u64 crc, const u8 *p, size_t len)
static u64 crc32c_combine_wrapper(u64 crc1, u64 crc2, size_t len2)
{
- return __crc32c_le_combine(crc1, crc2, len2);
+ return crc32c_combine(crc1, crc2, len2);
}
static const struct crc_variant crc_variant_crc32c = {
@@ -407,7 +438,34 @@ static void crc64_be_benchmark(struct kunit *test)
crc_benchmark(test, crc64_be_wrapper);
}
+/* crc64_nvme */
+
+static u64 crc64_nvme_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ /* The inversions that crc64_nvme() does have to be undone here. */
+ return ~crc64_nvme(~crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc64_nvme = {
+ .bits = 64,
+ .le = true,
+ .poly = 0x9a6c9329ac4bc9b5,
+ .func = crc64_nvme_wrapper,
+};
+
+static void crc64_nvme_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc64_nvme);
+}
+
+static void crc64_nvme_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc64_nvme_wrapper);
+}
+
static struct kunit_case crc_test_cases[] = {
+ KUNIT_CASE(crc7_be_test),
+ KUNIT_CASE(crc7_be_benchmark),
KUNIT_CASE(crc16_test),
KUNIT_CASE(crc16_benchmark),
KUNIT_CASE(crc_t10dif_test),
@@ -420,6 +478,8 @@ static struct kunit_case crc_test_cases[] = {
KUNIT_CASE(crc32c_benchmark),
KUNIT_CASE(crc64_be_test),
KUNIT_CASE(crc64_be_benchmark),
+ KUNIT_CASE(crc64_nvme_test),
+ KUNIT_CASE(crc64_nvme_benchmark),
{},
};
diff --git a/lib/fortify_kunit.c b/lib/tests/fortify_kunit.c
index ecb638d4cde1..29ffc62a71e3 100644
--- a/lib/fortify_kunit.c
+++ b/lib/tests/fortify_kunit.c
@@ -60,6 +60,7 @@ static int fortify_write_overflows;
static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
+static const char * const unchanging_12 = "this is 12!!";
static char array_unknown[] = "compiler thinks I might change";
void fortify_add_kunit_error(int write)
@@ -83,12 +84,28 @@ void fortify_add_kunit_error(int write)
static void fortify_test_known_sizes(struct kunit *test)
{
+ char stack[80] = "Test!";
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(stack)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(stack), 5);
+
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen("88888888")));
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
+
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(array_of_10)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(ptr_of_11)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(unchanging_12)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(unchanging_12), 12);
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(array_unknown)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
+
/* Externally defined and dynamically sized string pointer: */
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(test->name)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
}
@@ -394,8 +411,6 @@ struct fortify_padding {
char buf[32];
unsigned long bytes_after;
};
-/* Force compiler into not being able to resolve size at compile-time. */
-static volatile int unconst;
static void fortify_test_strlen(struct kunit *test)
{
@@ -520,57 +535,56 @@ static void fortify_test_strncpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
+ size_t sizeof_buf = sizeof(pad.buf);
+
+ OPTIMIZER_HIDE_VAR(sizeof_buf);
/* Destination is %NUL-filled to start with. */
KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* Legitimate strncpy() 1 less than of max size. */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst - 1)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf - 1)
== pad.buf);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* Only last byte should be %NUL */
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
/* Legitimate (though unterminated) max-size strncpy. */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf)
== pad.buf);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* No trailing %NUL -- thanks strncpy API. */
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* But we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* Now verify that FORTIFY is working... */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst + 1)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 1)
== pad.buf);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* And further... */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst + 2)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 2)
== pad.buf);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
@@ -579,55 +593,56 @@ static void fortify_test_strscpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
+ size_t sizeof_buf = sizeof(pad.buf);
+ size_t sizeof_src = sizeof(src);
+
+ OPTIMIZER_HIDE_VAR(sizeof_buf);
+ OPTIMIZER_HIDE_VAR(sizeof_src);
/* Destination is %NUL-filled to start with. */
KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* Legitimate strscpy() 1 less than of max size. */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(pad.buf) + unconst - 1),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf - 1),
-E2BIG);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* Keeping space for %NUL, last two bytes should be %NUL */
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
/* Legitimate max-size strscpy. */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(pad.buf) + unconst),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf),
-E2BIG);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* A trailing %NUL will exist. */
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* Now verify that FORTIFY is working... */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(pad.buf) + unconst + 1),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf + 1),
-E2BIG);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* And much further... */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(src) * 2 + unconst),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_src * 2),
-E2BIG);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
@@ -767,7 +782,9 @@ static void fortify_test_strlcat(struct kunit *test)
struct fortify_padding pad = { };
char src[sizeof(pad.buf)] = { };
int i, partial;
- int len = sizeof(pad.buf) + unconst;
+ int len = sizeof(pad.buf);
+
+ OPTIMIZER_HIDE_VAR(len);
/* Fill 15 bytes with valid characters. */
partial = sizeof(src) / 2 - 1;
@@ -857,28 +874,32 @@ struct fortify_zero_sized {
#define __fortify_test(memfunc) \
static void fortify_test_##memfunc(struct kunit *test) \
{ \
- struct fortify_zero_sized zero = { }; \
+ struct fortify_zero_sized empty = { }; \
struct fortify_padding pad = { }; \
char srcA[sizeof(pad.buf) + 2]; \
char srcB[sizeof(pad.buf) + 2]; \
- size_t len = sizeof(pad.buf) + unconst; \
+ size_t len = sizeof(pad.buf); \
+ size_t zero = 0; \
+ \
+ OPTIMIZER_HIDE_VAR(len); \
+ OPTIMIZER_HIDE_VAR(zero); \
\
memset(srcA, 'A', sizeof(srcA)); \
KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
memset(srcB, 'B', sizeof(srcB)); \
KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
\
- memfunc(pad.buf, srcA, 0 + unconst); \
+ memfunc(pad.buf, srcA, zero); \
KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- memfunc(pad.buf + 1, srcB, 1 + unconst); \
+ memfunc(pad.buf + 1, srcB, zero + 1); \
KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- memfunc(pad.buf, srcA, 1 + unconst); \
+ memfunc(pad.buf, srcA, zero + 1); \
KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
@@ -904,10 +925,10 @@ static void fortify_test_##memfunc(struct kunit *test) \
/* Reset error counter. */ \
fortify_write_overflows = 0; \
/* Copy nothing into nothing: no errors. */ \
- memfunc(zero.buf, srcB, 0 + unconst); \
+ memfunc(empty.buf, srcB, zero); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- memfunc(zero.buf, srcB, 1 + unconst); \
+ memfunc(empty.buf, srcB, zero + 1); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
}
@@ -919,7 +940,9 @@ static void fortify_test_memscan(struct kunit *test)
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
char needle = 'm';
- size_t len = sizeof(haystack) + unconst;
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
mem);
@@ -938,7 +961,9 @@ static void fortify_test_memchr(struct kunit *test)
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
char needle = 'm';
- size_t len = sizeof(haystack) + unconst;
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
mem);
@@ -957,7 +982,9 @@ static void fortify_test_memchr_inv(struct kunit *test)
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + 1;
char needle = 'W';
- size_t len = sizeof(haystack) + unconst;
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
/* Normal search is okay. */
KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
@@ -976,8 +1003,11 @@ static void fortify_test_memcmp(struct kunit *test)
{
char one[] = "My mind is going ...";
char two[] = "My mind is going ... I can feel it.";
- size_t one_len = sizeof(one) + unconst - 1;
- size_t two_len = sizeof(two) + unconst - 1;
+ size_t one_len = sizeof(one) - 1;
+ size_t two_len = sizeof(two) - 1;
+
+ OPTIMIZER_HIDE_VAR(one_len);
+ OPTIMIZER_HIDE_VAR(two_len);
/* We match the first string (ignoring the %NUL). */
KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
@@ -998,7 +1028,9 @@ static void fortify_test_kmemdup(struct kunit *test)
{
char src[] = "I got Doom running on it!";
char *copy;
- size_t len = sizeof(src) + unconst;
+ size_t len = sizeof(src);
+
+ OPTIMIZER_HIDE_VAR(len);
/* Copy is within bounds. */
copy = kmemdup(src, len, GFP_KERNEL);
diff --git a/lib/hashtable_test.c b/lib/tests/hashtable_test.c
index 3521de6bad15..3521de6bad15 100644
--- a/lib/hashtable_test.c
+++ b/lib/tests/hashtable_test.c
diff --git a/lib/is_signed_type_kunit.c b/lib/tests/is_signed_type_kunit.c
index 88adbe813f3a..88adbe813f3a 100644
--- a/lib/is_signed_type_kunit.c
+++ b/lib/tests/is_signed_type_kunit.c
diff --git a/lib/tests/kfifo_kunit.c b/lib/tests/kfifo_kunit.c
new file mode 100644
index 000000000000..a85eedc3195a
--- /dev/null
+++ b/lib/tests/kfifo_kunit.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the generic kernel FIFO implementation.
+ *
+ * Copyright (C) 2024 Diego Vieira <diego.daniel.professional@gmail.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/kfifo.h>
+
+#define KFIFO_SIZE 32
+#define N_ELEMENTS 5
+
+static void kfifo_test_reset_should_clear_the_fifo(struct kunit *test)
+{
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ kfifo_put(&my_fifo, 1);
+ kfifo_put(&my_fifo, 2);
+ kfifo_put(&my_fifo, 3);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ kfifo_reset(&my_fifo);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+}
+
+static void kfifo_test_define_should_define_an_empty_fifo(struct kunit *test)
+{
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+}
+
+static void kfifo_test_len_should_ret_n_of_stored_elements(struct kunit *test)
+{
+ u8 buffer1[N_ELEMENTS];
+
+ for (int i = 0; i < N_ELEMENTS; i++)
+ buffer1[i] = i + 1;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+
+ kfifo_in(&my_fifo, buffer1, N_ELEMENTS);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), N_ELEMENTS);
+
+ kfifo_in(&my_fifo, buffer1, N_ELEMENTS);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), N_ELEMENTS * 2);
+
+ kfifo_reset(&my_fifo);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+}
+
+static void kfifo_test_put_should_insert_and_get_should_pop(struct kunit *test)
+{
+ u8 out_data = 0;
+ int processed_elements;
+ u8 elements[] = { 3, 5, 11 };
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ // If the fifo is empty, get returns 0
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 0);
+ KUNIT_EXPECT_EQ(test, out_data, 0);
+
+ for (int i = 0; i < 3; i++)
+ kfifo_put(&my_fifo, elements[i]);
+
+ for (int i = 0; i < 3; i++) {
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, elements[i]);
+ }
+}
+
+static void kfifo_test_in_should_insert_multiple_elements(struct kunit *test)
+{
+ u8 in_buffer[] = { 11, 25, 65 };
+ u8 out_data;
+ int processed_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ kfifo_in(&my_fifo, in_buffer, 3);
+
+ for (int i = 0; i < 3; i++) {
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, in_buffer[i]);
+ }
+}
+
+static void kfifo_test_out_should_pop_multiple_elements(struct kunit *test)
+{
+ u8 in_buffer[] = { 11, 25, 65 };
+ u8 out_buffer[3];
+ int copied_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ for (int i = 0; i < 3; i++)
+ kfifo_put(&my_fifo, in_buffer[i]);
+
+ copied_elements = kfifo_out(&my_fifo, out_buffer, 3);
+ KUNIT_EXPECT_EQ(test, copied_elements, 3);
+
+ for (int i = 0; i < 3; i++)
+ KUNIT_EXPECT_EQ(test, out_buffer[i], in_buffer[i]);
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+}
+
+static void kfifo_test_dec_init_should_define_an_empty_fifo(struct kunit *test)
+{
+ DECLARE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ INIT_KFIFO(my_fifo);
+
+ // my_fifo is a struct with an inplace buffer
+ KUNIT_EXPECT_FALSE(test, __is_kfifo_ptr(&my_fifo));
+
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+}
+
+static void kfifo_test_define_should_equal_declare_init(struct kunit *test)
+{
+ // declare a variable my_fifo of type struct kfifo of u8
+ DECLARE_KFIFO(my_fifo1, u8, KFIFO_SIZE);
+ // initialize the my_fifo variable
+ INIT_KFIFO(my_fifo1);
+
+ // DEFINE_KFIFO declares the variable with the initial value
+ // essentially the same as calling DECLARE_KFIFO and INIT_KFIFO
+ DEFINE_KFIFO(my_fifo2, u8, KFIFO_SIZE);
+
+ // my_fifo1 and my_fifo2 have the same size
+ KUNIT_EXPECT_EQ(test, sizeof(my_fifo1), sizeof(my_fifo2));
+ KUNIT_EXPECT_EQ(test, kfifo_initialized(&my_fifo1),
+ kfifo_initialized(&my_fifo2));
+ KUNIT_EXPECT_EQ(test, kfifo_is_empty(&my_fifo1),
+ kfifo_is_empty(&my_fifo2));
+}
+
+static void kfifo_test_alloc_should_initiliaze_a_ptr_fifo(struct kunit *test)
+{
+ int ret;
+ DECLARE_KFIFO_PTR(my_fifo, u8);
+
+ INIT_KFIFO(my_fifo);
+
+ // kfifo_initialized returns false signaling the buffer pointer is NULL
+ KUNIT_EXPECT_FALSE(test, kfifo_initialized(&my_fifo));
+
+ // kfifo_alloc allocates the buffer
+ ret = kfifo_alloc(&my_fifo, KFIFO_SIZE, GFP_KERNEL);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "Memory allocation should succeed");
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+
+ // kfifo_free frees the buffer
+ kfifo_free(&my_fifo);
+}
+
+static void kfifo_test_peek_should_not_remove_elements(struct kunit *test)
+{
+ u8 out_data;
+ int processed_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ // If the fifo is empty, peek returns 0
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 0);
+
+ kfifo_put(&my_fifo, 3);
+ kfifo_put(&my_fifo, 5);
+ kfifo_put(&my_fifo, 11);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, 3);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ // Using peek doesn't remove the element
+ // so the read element and the fifo length
+ // remains the same
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, 3);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+}
+
+static struct kunit_case kfifo_test_cases[] = {
+ KUNIT_CASE(kfifo_test_reset_should_clear_the_fifo),
+ KUNIT_CASE(kfifo_test_define_should_define_an_empty_fifo),
+ KUNIT_CASE(kfifo_test_len_should_ret_n_of_stored_elements),
+ KUNIT_CASE(kfifo_test_put_should_insert_and_get_should_pop),
+ KUNIT_CASE(kfifo_test_in_should_insert_multiple_elements),
+ KUNIT_CASE(kfifo_test_out_should_pop_multiple_elements),
+ KUNIT_CASE(kfifo_test_dec_init_should_define_an_empty_fifo),
+ KUNIT_CASE(kfifo_test_define_should_equal_declare_init),
+ KUNIT_CASE(kfifo_test_alloc_should_initiliaze_a_ptr_fifo),
+ KUNIT_CASE(kfifo_test_peek_should_not_remove_elements),
+ {},
+};
+
+static struct kunit_suite kfifo_test_module = {
+ .name = "kfifo",
+ .test_cases = kfifo_test_cases,
+};
+
+kunit_test_suites(&kfifo_test_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Diego Vieira <diego.daniel.professional@gmail.com>");
+MODULE_DESCRIPTION("KUnit test for the kernel FIFO");
diff --git a/lib/kunit_iov_iter.c b/lib/tests/kunit_iov_iter.c
index 48342736d016..48342736d016 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/tests/kunit_iov_iter.c
diff --git a/lib/list-test.c b/lib/tests/list-test.c
index 9135cdc1bb39..9135cdc1bb39 100644
--- a/lib/list-test.c
+++ b/lib/tests/list-test.c
diff --git a/lib/memcpy_kunit.c b/lib/tests/memcpy_kunit.c
index d36933554e46..d36933554e46 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/tests/memcpy_kunit.c
diff --git a/lib/overflow_kunit.c b/lib/tests/overflow_kunit.c
index 5222c6393f11..894691b4411a 100644
--- a/lib/overflow_kunit.c
+++ b/lib/tests/overflow_kunit.c
@@ -1185,22 +1185,40 @@ struct bar {
static void DEFINE_FLEX_test(struct kunit *test)
{
- /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
- DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
-#ifdef CONFIG_CC_HAS_COUNTED_BY
- int expected_raw_size = sizeof(struct foo);
-#else
- int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16);
-#endif
- /* Without annotation, it will always be on-stack size. */
DEFINE_RAW_FLEX(struct bar, two, array, 2);
DEFINE_FLEX(struct foo, eight, array, counter, 8);
DEFINE_FLEX(struct foo, empty, array, counter, 0);
+ /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
+ DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
+ int array_size_override = 0;
- KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), expected_raw_size);
+ KUNIT_EXPECT_EQ(test, sizeof(*two), sizeof(struct bar));
KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16));
- KUNIT_EXPECT_EQ(test, __struct_size(eight), 24);
+ KUNIT_EXPECT_EQ(test, __member_size(two), sizeof(struct bar) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two->array), 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(two->array), 2 * sizeof(s16));
+
+ KUNIT_EXPECT_EQ(test, sizeof(*eight), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(eight), sizeof(struct foo) + 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(eight), sizeof(struct foo) + 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(eight->array), 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(eight->array), 8 * sizeof(s16));
+
+ KUNIT_EXPECT_EQ(test, sizeof(*empty), sizeof(struct foo));
KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __member_size(empty), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(empty->array), 0);
+ KUNIT_EXPECT_EQ(test, __member_size(empty->array), 0);
+
+ /* If __counted_by is not being used, array size will have the on-stack size. */
+ if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY))
+ array_size_override = 2 * sizeof(s16);
+
+ KUNIT_EXPECT_EQ(test, sizeof(*two_but_zero), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), sizeof(struct foo) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(two_but_zero), sizeof(struct foo) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero->array), array_size_override);
+ KUNIT_EXPECT_EQ(test, __member_size(two_but_zero->array), array_size_override);
}
static struct kunit_case overflow_test_cases[] = {
diff --git a/lib/test_printf.c b/lib/tests/printf_kunit.c
index 59dbe4f9a4cb..2c9f6170bacd 100644
--- a/lib/test_printf.c
+++ b/lib/tests/printf_kunit.c
@@ -3,9 +3,7 @@
* Test cases for printf facility.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
+#include <kunit/test.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/printk.h>
@@ -25,8 +23,6 @@
#include <linux/property.h>
-#include "../tools/testing/selftests/kselftest_module.h"
-
#define BUF_SIZE 256
#define PAD_SIZE 16
#define FILL_CHAR '$'
@@ -37,14 +33,14 @@
block \
__diag_pop();
-KSTM_MODULE_GLOBALS();
+static unsigned int total_tests;
-static char *test_buffer __initdata;
-static char *alloced_buffer __initdata;
+static char *test_buffer;
+static char *alloced_buffer;
-static int __printf(4, 0) __init
-do_test(int bufsize, const char *expect, int elen,
- const char *fmt, va_list ap)
+static void __printf(7, 0)
+do_test(struct kunit *kunittest, const char *file, const int line, int bufsize, const char *expect,
+ int elen, const char *fmt, va_list ap)
{
va_list aq;
int ret, written;
@@ -57,62 +53,70 @@ do_test(int bufsize, const char *expect, int elen,
va_end(aq);
if (ret != elen) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n",
- bufsize, fmt, ret, elen);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n",
+ file, line, bufsize, fmt, ret, elen);
+ return;
}
if (memchr_inv(alloced_buffer, FILL_CHAR, PAD_SIZE)) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n", bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n",
+ file, line, bufsize, fmt);
+ return;
}
if (!bufsize) {
if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE + PAD_SIZE)) {
- pr_warn("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n",
- fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n",
+ file, line, fmt);
}
- return 0;
+ return;
}
written = min(bufsize-1, elen);
if (test_buffer[written]) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n",
- bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n",
+ file, line, bufsize, fmt);
+ return;
}
if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
- bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
+ file, line, bufsize, fmt);
+ return;
}
if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n",
+ file, line, bufsize, fmt);
+ return;
}
if (memcmp(test_buffer, expect, written)) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
- bufsize, fmt, test_buffer, written, expect);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
+ file, line, bufsize, fmt, test_buffer, written, expect);
+ return;
}
- return 0;
}
-static void __printf(3, 4) __init
-__test(const char *expect, int elen, const char *fmt, ...)
+static void __printf(6, 7)
+__test(struct kunit *kunittest, const char *file, const int line, const char *expect, int elen,
+ const char *fmt, ...)
{
va_list ap;
int rand;
char *p;
if (elen >= BUF_SIZE) {
- pr_err("error in test suite: expected output length %d too long. Format was '%s'.\n",
- elen, fmt);
- failed_tests++;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: error in test suite: expected length (%d) >= BUF_SIZE (%d). fmt=\"%s\"\n",
+ file, line, elen, BUF_SIZE, fmt);
return;
}
@@ -124,19 +128,19 @@ __test(const char *expect, int elen, const char *fmt, ...)
* enough and 0), and then we also test that kvasprintf would
* be able to print it as expected.
*/
- failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, BUF_SIZE, expect, elen, fmt, ap);
rand = get_random_u32_inclusive(1, elen + 1);
/* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
- failed_tests += do_test(rand, expect, elen, fmt, ap);
- failed_tests += do_test(0, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, rand, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, 0, expect, elen, fmt, ap);
p = kvasprintf(GFP_KERNEL, fmt, ap);
if (p) {
total_tests++;
if (memcmp(p, expect, elen+1)) {
- pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
- fmt, p, expect);
- failed_tests++;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
+ file, line, fmt, p, expect);
}
kfree(p);
}
@@ -144,10 +148,10 @@ __test(const char *expect, int elen, const char *fmt, ...)
}
#define test(expect, fmt, ...) \
- __test(expect, strlen(expect), fmt, ##__VA_ARGS__)
+ __test(kunittest, __FILE__, __LINE__, expect, strlen(expect), fmt, ##__VA_ARGS__)
-static void __init
-test_basic(void)
+static void
+test_basic(struct kunit *kunittest)
{
/* Work around annoying "warning: zero-length gnu_printf format string". */
char nul = '\0';
@@ -155,11 +159,11 @@ test_basic(void)
test("", &nul);
test("100%", "100%%");
test("xxx%yyy", "xxx%cyyy", '%');
- __test("xxx\0yyy", 7, "xxx%cyyy", '\0');
+ __test(kunittest, __FILE__, __LINE__, "xxx\0yyy", 7, "xxx%cyyy", '\0');
}
-static void __init
-test_number(void)
+static void
+test_number(struct kunit *kunittest)
{
test("0x1234abcd ", "%#-12x", 0x1234abcd);
test(" 0x1234abcd", "%#12x", 0x1234abcd);
@@ -180,8 +184,8 @@ test_number(void)
test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0);
}
-static void __init
-test_string(void)
+static void
+test_string(struct kunit *kunittest)
{
test("", "%s%.0s", "", "123");
test("ABCD|abc|123", "%s|%.3s|%.*s", "ABCD", "abcdef", 3, "123456");
@@ -218,29 +222,6 @@ test_string(void)
#define ZEROS "00000000" /* hex 32 zero bits */
#define ONES "ffffffff" /* hex 32 one bits */
-static int __init
-plain_format(void)
-{
- char buf[PLAIN_BUF_SIZE];
- int nchars;
-
- nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
-
- if (nchars != PTR_WIDTH)
- return -1;
-
- if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
- pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
- PTR_VAL_NO_CRNG);
- return 0;
- }
-
- if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
- return -1;
-
- return 0;
-}
-
#else
#define PTR_WIDTH 8
@@ -250,92 +231,47 @@ plain_format(void)
#define ZEROS ""
#define ONES ""
-static int __init
-plain_format(void)
-{
- /* Format is implicitly tested for 32 bit machines by plain_hash() */
- return 0;
-}
-
#endif /* BITS_PER_LONG == 64 */
-static int __init
-plain_hash_to_buffer(const void *p, char *buf, size_t len)
+static void
+plain_hash_to_buffer(struct kunit *kunittest, const void *p, char *buf, size_t len)
{
- int nchars;
-
- nchars = snprintf(buf, len, "%p", p);
-
- if (nchars != PTR_WIDTH)
- return -1;
+ KUNIT_ASSERT_EQ(kunittest, snprintf(buf, len, "%p", p), PTR_WIDTH);
if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
- pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
- PTR_VAL_NO_CRNG);
- return 0;
+ kunit_skip(kunittest,
+ "crng possibly not yet initialized. plain 'p' buffer contains \"%s\"\n",
+ PTR_VAL_NO_CRNG);
}
-
- return 0;
}
-static int __init
-plain_hash(void)
+static void
+hash_pointer(struct kunit *kunittest)
{
- char buf[PLAIN_BUF_SIZE];
- int ret;
-
- ret = plain_hash_to_buffer(PTR, buf, PLAIN_BUF_SIZE);
- if (ret)
- return ret;
-
- if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
- return -1;
-
- return 0;
-}
+ if (no_hash_pointers)
+ kunit_skip(kunittest, "hash pointers disabled");
-/*
- * We can't use test() to test %p because we don't know what output to expect
- * after an address is hashed.
- */
-static void __init
-plain(void)
-{
- int err;
+ char buf[PLAIN_BUF_SIZE];
- if (no_hash_pointers) {
- pr_warn("skipping plain 'p' tests");
- skipped_tests += 2;
- return;
- }
+ plain_hash_to_buffer(kunittest, PTR, buf, PLAIN_BUF_SIZE);
- err = plain_hash();
- if (err) {
- pr_warn("plain 'p' does not appear to be hashed\n");
- failed_tests++;
- return;
- }
+ /*
+ * The hash of %p is unpredictable, therefore test() cannot be used.
+ *
+ * Instead verify that the first 32 bits are zeros on a 64-bit system
+ * and that the non-hashed value is not printed.
+ */
- err = plain_format();
- if (err) {
- pr_warn("hashing plain 'p' has unexpected format\n");
- failed_tests++;
- }
+ KUNIT_EXPECT_MEMEQ(kunittest, buf, ZEROS, strlen(ZEROS));
+ KUNIT_EXPECT_MEMNEQ(kunittest, buf, PTR_STR, PTR_WIDTH);
}
-static void __init
-test_hashed(const char *fmt, const void *p)
+static void
+test_hashed(struct kunit *kunittest, const char *fmt, const void *p)
{
char buf[PLAIN_BUF_SIZE];
- int ret;
- /*
- * No need to increase failed test counter since this is assumed
- * to be called after plain().
- */
- ret = plain_hash_to_buffer(p, buf, PLAIN_BUF_SIZE);
- if (ret)
- return;
+ plain_hash_to_buffer(kunittest, p, buf, PLAIN_BUF_SIZE);
test(buf, fmt, p);
}
@@ -343,8 +279,8 @@ test_hashed(const char *fmt, const void *p)
/*
* NULL pointers aren't hashed.
*/
-static void __init
-null_pointer(void)
+static void
+null_pointer(struct kunit *kunittest)
{
test(ZEROS "00000000", "%p", NULL);
test(ZEROS "00000000", "%px", NULL);
@@ -354,8 +290,8 @@ null_pointer(void)
/*
* Error pointers aren't hashed.
*/
-static void __init
-error_pointer(void)
+static void
+error_pointer(struct kunit *kunittest)
{
test(ONES "fffffff5", "%p", ERR_PTR(-11));
test(ONES "fffffff5", "%px", ERR_PTR(-11));
@@ -364,27 +300,27 @@ error_pointer(void)
#define PTR_INVALID ((void *)0x000000ab)
-static void __init
-invalid_pointer(void)
+static void
+invalid_pointer(struct kunit *kunittest)
{
- test_hashed("%p", PTR_INVALID);
+ test_hashed(kunittest, "%p", PTR_INVALID);
test(ZEROS "000000ab", "%px", PTR_INVALID);
test("(efault)", "%pE", PTR_INVALID);
}
-static void __init
-symbol_ptr(void)
+static void
+symbol_ptr(struct kunit *kunittest)
{
}
-static void __init
-kernel_ptr(void)
+static void
+kernel_ptr(struct kunit *kunittest)
{
/* We can't test this without access to kptr_restrict. */
}
-static void __init
-struct_resource(void)
+static void
+struct_resource(struct kunit *kunittest)
{
struct resource test_resource = {
.start = 0xc0ffee00,
@@ -432,8 +368,8 @@ struct_resource(void)
"%pR", &test_resource);
}
-static void __init
-struct_range(void)
+static void
+struct_range(struct kunit *kunittest)
{
struct range test_range = DEFINE_RANGE(0xc0ffee00ba5eba11,
0xc0ffee00ba5eba11);
@@ -448,18 +384,18 @@ struct_range(void)
"%pra", &test_range);
}
-static void __init
-addr(void)
+static void
+addr(struct kunit *kunittest)
{
}
-static void __init
-escaped_str(void)
+static void
+escaped_str(struct kunit *kunittest)
{
}
-static void __init
-hex_string(void)
+static void
+hex_string(struct kunit *kunittest)
{
const char buf[3] = {0xc0, 0xff, 0xee};
@@ -469,8 +405,8 @@ hex_string(void)
"%*ph|%*phC|%*phD|%*phN", 3, buf, 3, buf, 3, buf, 3, buf);
}
-static void __init
-mac(void)
+static void
+mac(struct kunit *kunittest)
{
const u8 addr[6] = {0x2d, 0x48, 0xd6, 0xfc, 0x7a, 0x05};
@@ -481,8 +417,8 @@ mac(void)
test("057afcd6482d", "%pmR", addr);
}
-static void __init
-ip4(void)
+static void
+ip4(struct kunit *kunittest)
{
struct sockaddr_in sa;
@@ -496,20 +432,13 @@ ip4(void)
test("001.002.003.004:12345|1.2.3.4:12345", "%piSp|%pISp", &sa, &sa);
}
-static void __init
-ip6(void)
+static void
+ip6(struct kunit *kunittest)
{
}
-static void __init
-ip(void)
-{
- ip4();
- ip6();
-}
-
-static void __init
-uuid(void)
+static void
+uuid(struct kunit *kunittest)
{
const char uuid[16] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
@@ -520,7 +449,7 @@ uuid(void)
test("03020100-0504-0706-0809-0A0B0C0D0E0F", "%pUL", uuid);
}
-static struct dentry test_dentry[4] __initdata = {
+static struct dentry test_dentry[4] = {
{ .d_parent = &test_dentry[0],
.d_name = QSTR_INIT(test_dentry[0].d_iname, 3),
.d_iname = "foo" },
@@ -535,8 +464,8 @@ static struct dentry test_dentry[4] __initdata = {
.d_iname = "romeo" },
};
-static void __init
-dentry(void)
+static void
+dentry(struct kunit *kunittest)
{
test("foo", "%pd", &test_dentry[0]);
test("foo", "%pd2", &test_dentry[0]);
@@ -556,13 +485,13 @@ dentry(void)
test(" bravo/alfa| bravo/alfa", "%12pd2|%*pd2", &test_dentry[2], 12, &test_dentry[2]);
}
-static void __init
-struct_va_format(void)
+static void
+struct_va_format(struct kunit *kunittest)
{
}
-static void __init
-time_and_date(void)
+static void
+time_and_date(struct kunit *kunittest)
{
/* 1543210543 */
const struct rtc_time tm = {
@@ -595,13 +524,13 @@ time_and_date(void)
test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
}
-static void __init
-struct_clk(void)
+static void
+struct_clk(struct kunit *kunittest)
{
}
-static void __init
-large_bitmap(void)
+static void
+large_bitmap(struct kunit *kunittest)
{
const int nbits = 1 << 16;
unsigned long *bits = bitmap_zalloc(nbits, GFP_KERNEL);
@@ -614,8 +543,8 @@ large_bitmap(void)
bitmap_free(bits);
}
-static void __init
-bitmap(void)
+static void
+bitmap(struct kunit *kunittest)
{
DECLARE_BITMAP(bits, 20);
const int primes[] = {2,3,5,7,11,13,17,19};
@@ -634,11 +563,11 @@ bitmap(void)
test("fffff|fffff", "%20pb|%*pb", bits, 20, bits);
test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits);
- large_bitmap();
+ large_bitmap(kunittest);
}
-static void __init
-netdev_features(void)
+static void
+netdev_features(struct kunit *kunittest)
{
}
@@ -663,9 +592,9 @@ static const struct page_flags_test pft[] = {
"%#x", "kasantag"},
};
-static void __init
-page_flags_test(int section, int node, int zone, int last_cpupid,
- int kasan_tag, unsigned long flags, const char *name,
+static void
+page_flags_test(struct kunit *kunittest, int section, int node, int zone,
+ int last_cpupid, int kasan_tag, unsigned long flags, const char *name,
char *cmp_buf)
{
unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
@@ -701,26 +630,25 @@ page_flags_test(int section, int node, int zone, int last_cpupid,
test(cmp_buf, "%pGp", &flags);
}
-static void __init
-flags(void)
+static void
+flags(struct kunit *kunittest)
{
unsigned long flags;
char *cmp_buffer;
gfp_t gfp;
- cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
- if (!cmp_buffer)
- return;
+ cmp_buffer = kunit_kmalloc(kunittest, BUF_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(kunittest, cmp_buffer);
flags = 0;
- page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
+ page_flags_test(kunittest, 0, 0, 0, 0, 0, flags, "", cmp_buffer);
flags = 1UL << NR_PAGEFLAGS;
- page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
+ page_flags_test(kunittest, 0, 0, 0, 0, 0, flags, "", cmp_buffer);
flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru
| 1UL << PG_active | 1UL << PG_swapbacked;
- page_flags_test(1, 1, 1, 0x1fffff, 1, flags,
+ page_flags_test(kunittest, 1, 1, 1, 0x1fffff, 1, flags,
"uptodate|dirty|lru|active|swapbacked",
cmp_buffer);
@@ -745,11 +673,9 @@ flags(void)
(unsigned long) gfp);
gfp |= __GFP_HIGH;
test(cmp_buffer, "%pGg", &gfp);
-
- kfree(cmp_buffer);
}
-static void __init fwnode_pointer(void)
+static void fwnode_pointer(struct kunit *kunittest)
{
const struct software_node first = { .name = "first" };
const struct software_node second = { .name = "second", .parent = &first };
@@ -763,8 +689,7 @@ static void __init fwnode_pointer(void)
rval = software_node_register_node_group(group);
if (rval) {
- pr_warn("cannot register softnodes; rval %d\n", rval);
- return;
+ kunit_skip(kunittest, "cannot register softnodes; rval %d\n", rval);
}
test(full_name_second, "%pfw", software_node_fwnode(&second));
@@ -776,7 +701,7 @@ static void __init fwnode_pointer(void)
software_node_unregister_node_group(group);
}
-static void __init fourcc_pointer(void)
+static void fourcc_pointer(struct kunit *kunittest)
{
struct {
u32 code;
@@ -793,14 +718,14 @@ static void __init fourcc_pointer(void)
test(try[i].str, "%p4cc", &try[i].code);
}
-static void __init
-errptr(void)
+static void
+errptr(struct kunit *kunittest)
{
test("-1234", "%pe", ERR_PTR(-1234));
/* Check that %pe with a non-ERR_PTR gets treated as ordinary %p. */
BUILD_BUG_ON(IS_ERR(PTR));
- test_hashed("%pe", PTR);
+ test_hashed(kunittest, "%pe", PTR);
#ifdef CONFIG_SYMBOLIC_ERRNAME
test("(-ENOTSOCK)", "(%pe)", ERR_PTR(-ENOTSOCK));
@@ -813,51 +738,66 @@ errptr(void)
#endif
}
-static void __init
-test_pointer(void)
-{
- plain();
- null_pointer();
- error_pointer();
- invalid_pointer();
- symbol_ptr();
- kernel_ptr();
- struct_resource();
- struct_range();
- addr();
- escaped_str();
- hex_string();
- mac();
- ip();
- uuid();
- dentry();
- struct_va_format();
- time_and_date();
- struct_clk();
- bitmap();
- netdev_features();
- flags();
- errptr();
- fwnode_pointer();
- fourcc_pointer();
-}
-
-static void __init selftest(void)
+static int printf_suite_init(struct kunit_suite *suite)
{
+ total_tests = 0;
+
alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL);
if (!alloced_buffer)
- return;
+ return -ENOMEM;
test_buffer = alloced_buffer + PAD_SIZE;
- test_basic();
- test_number();
- test_string();
- test_pointer();
+ return 0;
+}
+static void printf_suite_exit(struct kunit_suite *suite)
+{
kfree(alloced_buffer);
-}
-KSTM_MODULE_LOADERS(test_printf);
+ kunit_info(suite, "ran %u tests\n", total_tests);
+}
+
+static struct kunit_case printf_test_cases[] = {
+ KUNIT_CASE(test_basic),
+ KUNIT_CASE(test_number),
+ KUNIT_CASE(test_string),
+ KUNIT_CASE(hash_pointer),
+ KUNIT_CASE(null_pointer),
+ KUNIT_CASE(error_pointer),
+ KUNIT_CASE(invalid_pointer),
+ KUNIT_CASE(symbol_ptr),
+ KUNIT_CASE(kernel_ptr),
+ KUNIT_CASE(struct_resource),
+ KUNIT_CASE(struct_range),
+ KUNIT_CASE(addr),
+ KUNIT_CASE(escaped_str),
+ KUNIT_CASE(hex_string),
+ KUNIT_CASE(mac),
+ KUNIT_CASE(ip4),
+ KUNIT_CASE(ip6),
+ KUNIT_CASE(uuid),
+ KUNIT_CASE(dentry),
+ KUNIT_CASE(struct_va_format),
+ KUNIT_CASE(time_and_date),
+ KUNIT_CASE(struct_clk),
+ KUNIT_CASE(bitmap),
+ KUNIT_CASE(netdev_features),
+ KUNIT_CASE(flags),
+ KUNIT_CASE(errptr),
+ KUNIT_CASE(fwnode_pointer),
+ KUNIT_CASE(fourcc_pointer),
+ {}
+};
+
+static struct kunit_suite printf_test_suite = {
+ .name = "printf",
+ .suite_init = printf_suite_init,
+ .suite_exit = printf_suite_exit,
+ .test_cases = printf_test_cases,
+};
+
+kunit_test_suite(printf_test_suite);
+
MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
MODULE_DESCRIPTION("Test cases for printf facility");
MODULE_LICENSE("GPL");
diff --git a/lib/test_scanf.c b/lib/tests/scanf_kunit.c
index 44f8508c9d88..e9a36ed80575 100644
--- a/lib/test_scanf.c
+++ b/lib/tests/scanf_kunit.c
@@ -3,152 +3,138 @@
* Test cases for sscanf facility.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
+#include <kunit/test.h>
#include <linux/bitops.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/overflow.h>
-#include <linux/printk.h>
#include <linux/prandom.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include "../tools/testing/selftests/kselftest_module.h"
-
#define BUF_SIZE 1024
-KSTM_MODULE_GLOBALS();
-static char *test_buffer __initdata;
-static char *fmt_buffer __initdata;
-static struct rnd_state rnd_state __initdata;
+static char *test_buffer;
+static char *fmt_buffer;
+static struct rnd_state rnd_state;
-typedef int (*check_fn)(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap);
+typedef void (*check_fn)(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap);
-static void __scanf(4, 6) __init
-_test(check_fn fn, const void *check_data, const char *string, const char *fmt,
- int n_args, ...)
+static void __scanf(7, 9)
+_test(struct kunit *test, const char *file, const int line, check_fn fn, const void *check_data,
+ const char *string, const char *fmt, int n_args, ...)
{
va_list ap, ap_copy;
int ret;
- total_tests++;
-
va_start(ap, n_args);
va_copy(ap_copy, ap);
ret = vsscanf(string, fmt, ap_copy);
va_end(ap_copy);
if (ret != n_args) {
- pr_warn("vsscanf(\"%s\", \"%s\", ...) returned %d expected %d\n",
- string, fmt, ret, n_args);
- goto fail;
+ KUNIT_FAIL(test, "%s:%d: vsscanf(\"%s\", \"%s\", ...) returned %d expected %d",
+ file, line, string, fmt, ret, n_args);
+ } else {
+ (*fn)(test, file, line, check_data, string, fmt, n_args, ap);
}
- ret = (*fn)(check_data, string, fmt, n_args, ap);
- if (ret)
- goto fail;
-
- va_end(ap);
-
- return;
-
-fail:
- failed_tests++;
va_end(ap);
}
#define _check_numbers_template(arg_fmt, expect, str, fmt, n_args, ap) \
do { \
- pr_debug("\"%s\", \"%s\" ->\n", str, fmt); \
for (; n_args > 0; n_args--, expect++) { \
typeof(*expect) got = *va_arg(ap, typeof(expect)); \
- pr_debug("\t" arg_fmt "\n", got); \
if (got != *expect) { \
- pr_warn("vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt "\n", \
- str, fmt, *expect, got); \
- return 1; \
+ KUNIT_FAIL(test, \
+ "%s:%d: vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt, \
+ file, line, str, fmt, *expect, got); \
+ return; \
} \
} \
- return 0; \
} while (0)
-static int __init check_ull(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ull(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const unsigned long long *pval = check_data;
_check_numbers_template("%llu", pval, string, fmt, n_args, ap);
}
-static int __init check_ll(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ll(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const long long *pval = check_data;
_check_numbers_template("%lld", pval, string, fmt, n_args, ap);
}
-static int __init check_ulong(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ulong(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const unsigned long *pval = check_data;
_check_numbers_template("%lu", pval, string, fmt, n_args, ap);
}
-static int __init check_long(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_long(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const long *pval = check_data;
_check_numbers_template("%ld", pval, string, fmt, n_args, ap);
}
-static int __init check_uint(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_uint(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const unsigned int *pval = check_data;
_check_numbers_template("%u", pval, string, fmt, n_args, ap);
}
-static int __init check_int(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_int(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const int *pval = check_data;
_check_numbers_template("%d", pval, string, fmt, n_args, ap);
}
-static int __init check_ushort(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ushort(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const unsigned short *pval = check_data;
_check_numbers_template("%hu", pval, string, fmt, n_args, ap);
}
-static int __init check_short(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_short(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const short *pval = check_data;
_check_numbers_template("%hd", pval, string, fmt, n_args, ap);
}
-static int __init check_uchar(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_uchar(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const unsigned char *pval = check_data;
_check_numbers_template("%hhu", pval, string, fmt, n_args, ap);
}
-static int __init check_char(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_char(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const signed char *pval = check_data;
@@ -156,7 +142,7 @@ static int __init check_char(const void *check_data, const char *string,
}
/* Selection of interesting numbers to test, copied from test-kstrtox.c */
-static const unsigned long long numbers[] __initconst = {
+static const unsigned long long numbers[] = {
0x0ULL,
0x1ULL,
0x7fULL,
@@ -196,7 +182,7 @@ do { \
T result = ~expect_val; /* should be overwritten */ \
\
snprintf(test_buffer, BUF_SIZE, gen_fmt, expect_val); \
- _test(fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result); \
+ _test(test, __FILE__, __LINE__, fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result);\
} while (0)
#define simple_numbers_loop(T, gen_fmt, scan_fmt, fn) \
@@ -214,7 +200,7 @@ do { \
} \
} while (0)
-static void __init numbers_simple(void)
+static void numbers_simple(struct kunit *test)
{
simple_numbers_loop(unsigned long long, "%llu", "llu", check_ull);
simple_numbers_loop(long long, "%lld", "lld", check_ll);
@@ -267,14 +253,14 @@ static void __init numbers_simple(void)
* the raw prandom*() functions (Not mathematically rigorous!!).
* Variabilty of length and value is more important than perfect randomness.
*/
-static u32 __init next_test_random(u32 max_bits)
+static u32 next_test_random(u32 max_bits)
{
u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
}
-static unsigned long long __init next_test_random_ull(void)
+static unsigned long long next_test_random_ull(void)
{
u32 rand1 = prandom_u32_state(&rnd_state);
u32 n_bits = (hweight32(rand1) * 3) % 64;
@@ -311,7 +297,7 @@ do { \
* updating buf_pos and returning the number of characters appended.
* On error buf_pos is not changed and return value is 0.
*/
-static int __init __printf(4, 5)
+static int __printf(4, 5)
append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
{
va_list ap;
@@ -333,7 +319,7 @@ append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
* Convenience function to append the field delimiter string
* to both the value string and format string buffers.
*/
-static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
+static void append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
char *fmt_buf, int *fmt_buf_pos, int fmt_buf_len,
const char *delim_str)
{
@@ -344,7 +330,7 @@ static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len
#define test_array_8(fn, check_data, string, fmt, arr) \
do { \
BUILD_BUG_ON(ARRAY_SIZE(arr) != 8); \
- _test(fn, check_data, string, fmt, 8, \
+ _test(test, __FILE__, __LINE__, fn, check_data, string, fmt, 8, \
&(arr)[0], &(arr)[1], &(arr)[2], &(arr)[3], \
&(arr)[4], &(arr)[5], &(arr)[6], &(arr)[7]); \
} while (0)
@@ -398,7 +384,7 @@ do { \
test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
} while (0)
-static void __init numbers_list_ll(const char *delim)
+static void numbers_list_ll(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_8(long long, "%lld", delim, "lld", check_ll);
@@ -408,7 +394,7 @@ static void __init numbers_list_ll(const char *delim)
numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
}
-static void __init numbers_list_l(const char *delim)
+static void numbers_list_l(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_8(long, "%ld", delim, "ld", check_long);
@@ -418,7 +404,7 @@ static void __init numbers_list_l(const char *delim)
numbers_list_8(long, "0x%lx", delim, "li", check_long);
}
-static void __init numbers_list_d(const char *delim)
+static void numbers_list_d(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
numbers_list_8(int, "%d", delim, "d", check_int);
@@ -428,7 +414,7 @@ static void __init numbers_list_d(const char *delim)
numbers_list_8(int, "0x%x", delim, "i", check_int);
}
-static void __init numbers_list_h(const char *delim)
+static void numbers_list_h(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_8(short, "%hd", delim, "hd", check_short);
@@ -438,7 +424,7 @@ static void __init numbers_list_h(const char *delim)
numbers_list_8(short, "0x%hx", delim, "hi", check_short);
}
-static void __init numbers_list_hh(const char *delim)
+static void numbers_list_hh(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
@@ -448,16 +434,19 @@ static void __init numbers_list_hh(const char *delim)
numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
}
-static void __init numbers_list(const char *delim)
+static void numbers_list(struct kunit *test)
{
- numbers_list_ll(delim);
- numbers_list_l(delim);
- numbers_list_d(delim);
- numbers_list_h(delim);
- numbers_list_hh(delim);
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_ll(test, delim);
+ numbers_list_l(test, delim);
+ numbers_list_d(test, delim);
+ numbers_list_h(test, delim);
+ numbers_list_hh(test, delim);
}
-static void __init numbers_list_field_width_ll(const char *delim)
+static void numbers_list_field_width_ll(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
@@ -467,7 +456,7 @@ static void __init numbers_list_field_width_ll(const char *delim)
numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
}
-static void __init numbers_list_field_width_l(const char *delim)
+static void numbers_list_field_width_l(struct kunit *test, const char *delim)
{
#if BITS_PER_LONG == 64
numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
@@ -486,7 +475,7 @@ static void __init numbers_list_field_width_l(const char *delim)
#endif
}
-static void __init numbers_list_field_width_d(const char *delim)
+static void numbers_list_field_width_d(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
@@ -496,7 +485,7 @@ static void __init numbers_list_field_width_d(const char *delim)
numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
}
-static void __init numbers_list_field_width_h(const char *delim)
+static void numbers_list_field_width_h(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
@@ -506,7 +495,7 @@ static void __init numbers_list_field_width_h(const char *delim)
numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
}
-static void __init numbers_list_field_width_hh(const char *delim)
+static void numbers_list_field_width_hh(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
@@ -520,16 +509,19 @@ static void __init numbers_list_field_width_hh(const char *delim)
* List of numbers separated by delim. Each field width specifier is the
* maximum possible digits for the given type and base.
*/
-static void __init numbers_list_field_width_typemax(const char *delim)
+static void numbers_list_field_width_typemax(struct kunit *test)
{
- numbers_list_field_width_ll(delim);
- numbers_list_field_width_l(delim);
- numbers_list_field_width_d(delim);
- numbers_list_field_width_h(delim);
- numbers_list_field_width_hh(delim);
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_field_width_ll(test, delim);
+ numbers_list_field_width_l(test, delim);
+ numbers_list_field_width_d(test, delim);
+ numbers_list_field_width_h(test, delim);
+ numbers_list_field_width_hh(test, delim);
}
-static void __init numbers_list_field_width_val_ll(const char *delim)
+static void numbers_list_field_width_val_ll(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
@@ -539,7 +531,7 @@ static void __init numbers_list_field_width_val_ll(const char *delim)
numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
}
-static void __init numbers_list_field_width_val_l(const char *delim)
+static void numbers_list_field_width_val_l(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_val_width(long, "%ld", delim, "ld", check_long);
@@ -549,7 +541,7 @@ static void __init numbers_list_field_width_val_l(const char *delim)
numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
}
-static void __init numbers_list_field_width_val_d(const char *delim)
+static void numbers_list_field_width_val_d(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
numbers_list_val_width(int, "%d", delim, "d", check_int);
@@ -559,7 +551,7 @@ static void __init numbers_list_field_width_val_d(const char *delim)
numbers_list_val_width(int, "0x%x", delim, "i", check_int);
}
-static void __init numbers_list_field_width_val_h(const char *delim)
+static void numbers_list_field_width_val_h(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_val_width(short, "%hd", delim, "hd", check_short);
@@ -569,7 +561,7 @@ static void __init numbers_list_field_width_val_h(const char *delim)
numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
}
-static void __init numbers_list_field_width_val_hh(const char *delim)
+static void numbers_list_field_width_val_hh(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
@@ -583,13 +575,16 @@ static void __init numbers_list_field_width_val_hh(const char *delim)
* List of numbers separated by delim. Each field width specifier is the
* exact length of the corresponding value digits in the string being scanned.
*/
-static void __init numbers_list_field_width_val_width(const char *delim)
+static void numbers_list_field_width_val_width(struct kunit *test)
{
- numbers_list_field_width_val_ll(delim);
- numbers_list_field_width_val_l(delim);
- numbers_list_field_width_val_d(delim);
- numbers_list_field_width_val_h(delim);
- numbers_list_field_width_val_hh(delim);
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_field_width_val_ll(test, delim);
+ numbers_list_field_width_val_l(test, delim);
+ numbers_list_field_width_val_d(test, delim);
+ numbers_list_field_width_val_h(test, delim);
+ numbers_list_field_width_val_hh(test, delim);
}
/*
@@ -598,9 +593,14 @@ static void __init numbers_list_field_width_val_width(const char *delim)
* of digits. For example the hex values c0,3,bf01,303 would have a
* string representation of "c03bf01303" and extracted with "%2x%1x%4x%3x".
*/
-static void __init numbers_slice(void)
+static void numbers_slice(struct kunit *test)
{
- numbers_list_field_width_val_width("");
+ const char *delim = "";
+
+ KUNIT_ASSERT_PTR_EQ(test, test->param_value, NULL);
+ test->param_value = &delim;
+
+ numbers_list_field_width_val_width(test);
}
#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
@@ -608,14 +608,14 @@ do { \
const T expect[2] = { expect0, expect1 }; \
T result[2] = { (T)~expect[0], (T)~expect[1] }; \
\
- _test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
+ _test(test, __FILE__, __LINE__, fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]);\
} while (0)
/*
* Number prefix is >= field width.
* Expected behaviour is derived from testing userland sscanf.
*/
-static void __init numbers_prefix_overflow(void)
+static void numbers_prefix_overflow(struct kunit *test)
{
/*
* Negative decimal with a field of width 1, should quit scanning
@@ -684,25 +684,17 @@ do { \
T got; \
char *endp; \
int len; \
- bool fail = false; \
\
- total_tests++; \
len = snprintf(test_buffer, BUF_SIZE, gen_fmt, expect); \
got = (fn)(test_buffer, &endp, base); \
- pr_debug(#fn "(\"%s\", %d) -> " gen_fmt "\n", test_buffer, base, got); \
if (got != (expect)) { \
- fail = true; \
- pr_warn(#fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt "\n", \
- test_buffer, base, got, expect); \
+ KUNIT_FAIL(test, #fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt, \
+ test_buffer, base, got, expect); \
} else if (endp != test_buffer + len) { \
- fail = true; \
- pr_warn(#fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px\n", \
- test_buffer, base, test_buffer, \
- test_buffer + len, endp); \
+ KUNIT_FAIL(test, #fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px", \
+ test_buffer, base, test_buffer, \
+ test_buffer + len, endp); \
} \
- \
- if (fail) \
- failed_tests++; \
} while (0)
#define test_simple_strtoxx(T, fn, gen_fmt, base) \
@@ -718,7 +710,7 @@ do { \
} \
} while (0)
-static void __init test_simple_strtoull(void)
+static void test_simple_strtoull(struct kunit *test)
{
test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 10);
test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 0);
@@ -727,7 +719,7 @@ static void __init test_simple_strtoull(void)
test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 0);
}
-static void __init test_simple_strtoll(void)
+static void test_simple_strtoll(struct kunit *test)
{
test_simple_strtoxx(long long, simple_strtoll, "%lld", 10);
test_simple_strtoxx(long long, simple_strtoll, "%lld", 0);
@@ -736,7 +728,7 @@ static void __init test_simple_strtoll(void)
test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 0);
}
-static void __init test_simple_strtoul(void)
+static void test_simple_strtoul(struct kunit *test)
{
test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 10);
test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 0);
@@ -745,7 +737,7 @@ static void __init test_simple_strtoul(void)
test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 0);
}
-static void __init test_simple_strtol(void)
+static void test_simple_strtol(struct kunit *test)
{
test_simple_strtoxx(long, simple_strtol, "%ld", 10);
test_simple_strtoxx(long, simple_strtol, "%ld", 0);
@@ -755,60 +747,69 @@ static void __init test_simple_strtol(void)
}
/* Selection of common delimiters/separators between numbers in a string. */
-static const char * const number_delimiters[] __initconst = {
+static const char * const number_delimiters[] = {
" ", ":", ",", "-", "/",
};
-static void __init test_numbers(void)
+static void number_delimiter_param_desc(const char * const *param,
+ char *desc)
{
- int i;
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "\"%s\"", *param);
+}
- /* String containing only one number. */
- numbers_simple();
+KUNIT_ARRAY_PARAM(number_delimiters, number_delimiters, number_delimiter_param_desc);
+static struct kunit_case scanf_test_cases[] = {
+ KUNIT_CASE(numbers_simple),
/* String with multiple numbers separated by delimiter. */
- for (i = 0; i < ARRAY_SIZE(number_delimiters); i++) {
- numbers_list(number_delimiters[i]);
-
- /* Field width may be longer than actual field digits. */
- numbers_list_field_width_typemax(number_delimiters[i]);
-
- /* Each field width exactly length of actual field digits. */
- numbers_list_field_width_val_width(number_delimiters[i]);
- }
-
+ KUNIT_CASE_PARAM(numbers_list, number_delimiters_gen_params),
+ /* Field width may be longer than actual field digits. */
+ KUNIT_CASE_PARAM(numbers_list_field_width_typemax, number_delimiters_gen_params),
+ /* Each field width exactly length of actual field digits. */
+ KUNIT_CASE_PARAM(numbers_list_field_width_val_width, number_delimiters_gen_params),
/* Slice continuous sequence of digits using field widths. */
- numbers_slice();
-
- numbers_prefix_overflow();
-}
+ KUNIT_CASE(numbers_slice),
+ KUNIT_CASE(numbers_prefix_overflow),
+
+ KUNIT_CASE(test_simple_strtoull),
+ KUNIT_CASE(test_simple_strtoll),
+ KUNIT_CASE(test_simple_strtoul),
+ KUNIT_CASE(test_simple_strtol),
+ {}
+};
-static void __init selftest(void)
+static int scanf_suite_init(struct kunit_suite *suite)
{
test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!test_buffer)
- return;
+ return -ENOMEM;
fmt_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!fmt_buffer) {
kfree(test_buffer);
- return;
+ return -ENOMEM;
}
prandom_seed_state(&rnd_state, 3141592653589793238ULL);
- test_numbers();
-
- test_simple_strtoull();
- test_simple_strtoll();
- test_simple_strtoul();
- test_simple_strtol();
+ return 0;
+}
+static void scanf_suite_exit(struct kunit_suite *suite)
+{
kfree(fmt_buffer);
kfree(test_buffer);
}
-KSTM_MODULE_LOADERS(test_scanf);
+static struct kunit_suite scanf_test_suite = {
+ .name = "scanf",
+ .suite_init = scanf_suite_init,
+ .suite_exit = scanf_suite_exit,
+ .test_cases = scanf_test_cases,
+};
+
+kunit_test_suite(scanf_test_suite);
+
MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
MODULE_DESCRIPTION("Test cases for sscanf facility");
MODULE_LICENSE("GPL v2");
diff --git a/lib/siphash_kunit.c b/lib/tests/siphash_kunit.c
index 26bd4e8dc03e..26bd4e8dc03e 100644
--- a/lib/siphash_kunit.c
+++ b/lib/tests/siphash_kunit.c
diff --git a/lib/slub_kunit.c b/lib/tests/slub_kunit.c
index f11691315c2f..d47c472b0520 100644
--- a/lib/slub_kunit.c
+++ b/lib/tests/slub_kunit.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
+#include <linux/delay.h>
#include "../mm/slab.h"
static struct kunit_resource resource;
@@ -181,6 +182,63 @@ static void test_kfree_rcu(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, slab_errors);
}
+struct cache_destroy_work {
+ struct work_struct work;
+ struct kmem_cache *s;
+};
+
+static void cache_destroy_workfn(struct work_struct *w)
+{
+ struct cache_destroy_work *cdw;
+
+ cdw = container_of(w, struct cache_destroy_work, work);
+ kmem_cache_destroy(cdw->s);
+}
+
+#define KMEM_CACHE_DESTROY_NR 10
+
+static void test_kfree_rcu_wq_destroy(struct kunit *test)
+{
+ struct test_kfree_rcu_struct *p;
+ struct cache_destroy_work cdw;
+ struct workqueue_struct *wq;
+ struct kmem_cache *s;
+ unsigned int delay;
+ int i;
+
+ if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
+ kunit_skip(test, "can't do kfree_rcu() when test is built-in");
+
+ INIT_WORK_ONSTACK(&cdw.work, cache_destroy_workfn);
+ wq = alloc_workqueue("test_kfree_rcu_destroy_wq",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+
+ if (!wq)
+ kunit_skip(test, "failed to alloc wq");
+
+ for (i = 0; i < KMEM_CACHE_DESTROY_NR; i++) {
+ s = test_kmem_cache_create("TestSlub_kfree_rcu_wq_destroy",
+ sizeof(struct test_kfree_rcu_struct),
+ SLAB_NO_MERGE);
+
+ if (!s)
+ kunit_skip(test, "failed to create cache");
+
+ delay = get_random_u8();
+ p = kmem_cache_alloc(s, GFP_KERNEL);
+ kfree_rcu(p, rcu);
+
+ cdw.s = s;
+
+ msleep(delay);
+ queue_work(wq, &cdw.work);
+ flush_work(&cdw.work);
+ }
+
+ destroy_workqueue(wq);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+
static void test_leak_destroy(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
@@ -254,6 +312,7 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_clobber_redzone_free),
KUNIT_CASE(test_kmalloc_redzone_access),
KUNIT_CASE(test_kfree_rcu),
+ KUNIT_CASE(test_kfree_rcu_wq_destroy),
KUNIT_CASE(test_leak_destroy),
KUNIT_CASE(test_krealloc_redzone_zeroing),
{}
diff --git a/lib/stackinit_kunit.c b/lib/tests/stackinit_kunit.c
index 135322592faf..63aa78e6f5c1 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/tests/stackinit_kunit.c
@@ -185,6 +185,15 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
INIT_STRUCT_assigned_copy(var_type)
/*
+ * The "did we actually fill the stack?" check value needs
+ * to be neither 0 nor any of the "pattern" bytes. The
+ * pattern bytes are compiler, architecture, and type based,
+ * so we have to pick a value that never appears for those
+ * combinations. Use 0x99 which is not 0xFF, 0xFE, nor 0xAA.
+ */
+#define FILL_BYTE 0x99
+
+/*
* @name: unique string name for the test
* @var_type: type to be tested for zeroing initialization
* @which: is this a SCALAR, STRING, or STRUCT type?
@@ -206,12 +215,12 @@ static noinline void test_ ## name (struct kunit *test) \
ZERO_CLONE_ ## which(zero); \
/* Clear entire check buffer for 0xFF overlap test. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
- /* Fill stack with 0xFF. */ \
+ /* Fill stack with FILL_BYTE. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 1, \
FETCH_ARG_ ## which(zero)); \
- /* Verify all bytes overwritten with 0xFF. */ \
+ /* Verify all bytes overwritten with FILL_BYTE. */ \
for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] != 0xFF); \
+ sum += (check_buf[i] != FILL_BYTE); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
@@ -222,7 +231,8 @@ static noinline void test_ ## name (struct kunit *test) \
* possible between the two leaf function calls. \
*/ \
KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
- "leaf fill was not 0xFF!?\n"); \
+ "leaf fill was not 0x%02X!?\n", \
+ FILL_BYTE); \
\
/* Validate that compiler lined up fill and target. */ \
KUNIT_ASSERT_TRUE_MSG(test, \
@@ -234,9 +244,9 @@ static noinline void test_ ## name (struct kunit *test) \
(int)((ssize_t)(uintptr_t)fill_start - \
(ssize_t)(uintptr_t)target_start)); \
\
- /* Look for any bytes still 0xFF in check region. */ \
+ /* Validate check region has no FILL_BYTE bytes. */ \
for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] == 0xFF); \
+ sum += (check_buf[i] == FILL_BYTE); \
\
if (sum != 0 && xfail) \
kunit_skip(test, \
@@ -271,12 +281,12 @@ static noinline int leaf_ ## name(unsigned long sp, bool fill, \
* stack frame of SOME kind... \
*/ \
memset(buf, (char)(sp & 0xff), sizeof(buf)); \
- /* Fill variable with 0xFF. */ \
+ /* Fill variable with FILL_BYTE. */ \
if (fill) { \
fill_start = &var; \
fill_size = sizeof(var); \
memset(fill_start, \
- (char)((sp & 0xff) | forced_mask), \
+ FILL_BYTE & forced_mask, \
fill_size); \
} \
\
@@ -469,7 +479,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
fill_start = &var;
fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0x55, fill_size);
+ memset(fill_start, (forced_mask | 0x55) & FILL_BYTE, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
@@ -480,7 +490,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
fill_start = &var;
fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0xaa, fill_size);
+ memset(fill_start, (forced_mask | 0xaa) & FILL_BYTE, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
diff --git a/lib/string_helpers_kunit.c b/lib/tests/string_helpers_kunit.c
index c853046183d2..c853046183d2 100644
--- a/lib/string_helpers_kunit.c
+++ b/lib/tests/string_helpers_kunit.c
diff --git a/lib/string_kunit.c b/lib/tests/string_kunit.c
index c919e3293da6..0ed7448a26d3 100644
--- a/lib/string_kunit.c
+++ b/lib/tests/string_kunit.c
@@ -579,8 +579,8 @@ static void string_test_strtomem(struct kunit *test)
static void string_test_memtostr(struct kunit *test)
{
- char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
- char nonstring_small[3] = { 'a', 'b', 'c' };
+ char nonstring[7] __nonstring = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
+ char nonstring_small[3] __nonstring = { 'a', 'b', 'c' };
char dest[sizeof(nonstring) + 1];
/* Copy in a non-NUL-terminated string into exactly right-sized dest. */
diff --git a/lib/test_bits.c b/lib/tests/test_bits.c
index c7b38d91e1f1..c7b38d91e1f1 100644
--- a/lib/test_bits.c
+++ b/lib/tests/test_bits.c
diff --git a/lib/test_fprobe.c b/lib/tests/test_fprobe.c
index cf92111b5c79..cf92111b5c79 100644
--- a/lib/test_fprobe.c
+++ b/lib/tests/test_fprobe.c
diff --git a/lib/test_hash.c b/lib/tests/test_hash.c
index a7af39662a0a..a7af39662a0a 100644
--- a/lib/test_hash.c
+++ b/lib/tests/test_hash.c
diff --git a/lib/test_kprobes.c b/lib/tests/test_kprobes.c
index b7582010125c..b7582010125c 100644
--- a/lib/test_kprobes.c
+++ b/lib/tests/test_kprobes.c
diff --git a/lib/test_linear_ranges.c b/lib/tests/test_linear_ranges.c
index f482be00f1bc..f482be00f1bc 100644
--- a/lib/test_linear_ranges.c
+++ b/lib/tests/test_linear_ranges.c
diff --git a/lib/test_list_sort.c b/lib/tests/test_list_sort.c
index 30879abc8a42..30879abc8a42 100644
--- a/lib/test_list_sort.c
+++ b/lib/tests/test_list_sort.c
diff --git a/lib/test_sort.c b/lib/tests/test_sort.c
index cd4a338d1153..cd4a338d1153 100644
--- a/lib/test_sort.c
+++ b/lib/tests/test_sort.c
diff --git a/lib/usercopy_kunit.c b/lib/tests/usercopy_kunit.c
index 77fa00a13df7..77fa00a13df7 100644
--- a/lib/usercopy_kunit.c
+++ b/lib/tests/usercopy_kunit.c
diff --git a/lib/util_macros_kunit.c b/lib/tests/util_macros_kunit.c
index 94cc9f0de50a..94cc9f0de50a 100644
--- a/lib/util_macros_kunit.c
+++ b/lib/tests/util_macros_kunit.c
diff --git a/lib/ubsan.c b/lib/ubsan.c
index a1c983d148f1..cdc1d31c3821 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -44,7 +44,7 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_shift_out_of_bounds:
return "UBSAN: shift out of bounds";
#endif
-#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_SIGNED_WRAP)
+#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_INTEGER_WRAP)
/*
* SanitizerKind::IntegerDivideByZero and
* SanitizerKind::SignedIntegerOverflow emit
@@ -79,7 +79,7 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_type_mismatch:
return "UBSAN: type mismatch";
#endif
-#ifdef CONFIG_UBSAN_SIGNED_WRAP
+#ifdef CONFIG_UBSAN_INTEGER_WRAP
/*
* SanitizerKind::SignedIntegerOverflow emits
* SanitizerHandler::AddOverflow, SanitizerHandler::SubOverflow,
@@ -303,6 +303,30 @@ void __ubsan_handle_negate_overflow(void *_data, void *old_val)
}
EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
+void __ubsan_handle_implicit_conversion(void *_data, void *from_val, void *to_val)
+{
+ struct implicit_conversion_data *data = _data;
+ char from_val_str[VALUE_LENGTH];
+ char to_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ val_to_string(from_val_str, sizeof(from_val_str), data->from_type, from_val);
+ val_to_string(to_val_str, sizeof(to_val_str), data->to_type, to_val);
+
+ ubsan_prologue(&data->location, "implicit-conversion");
+
+ pr_err("cannot represent %s value %s during %s %s, truncated to %s\n",
+ data->from_type->type_name,
+ from_val_str,
+ type_check_kinds[data->type_check_kind],
+ data->to_type->type_name,
+ to_val_str);
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_implicit_conversion);
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 07e37d4429b4..b37e22374e77 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -62,6 +62,13 @@ struct overflow_data {
struct type_descriptor *type;
};
+struct implicit_conversion_data {
+ struct source_location location;
+ struct type_descriptor *from_type;
+ struct type_descriptor *to_type;
+ unsigned char type_check_kind;
+};
+
struct type_mismatch_data {
struct source_location location;
struct type_descriptor *type;
@@ -142,6 +149,7 @@ void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs)
void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val);
void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_implicit_conversion(void *_data, void *lhs, void *rhs);
void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index);
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
index 82fe827af542..45df764b49ad 100644
--- a/lib/vdso/Kconfig
+++ b/lib/vdso/Kconfig
@@ -43,3 +43,8 @@ config VDSO_GETRANDOM
bool
help
Selected by architectures that support vDSO getrandom().
+
+config GENERIC_VDSO_DATA_STORE
+ bool
+ help
+ Selected by architectures that use the generic vDSO data store.
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
index cedbf15f8087..aedd40aaa950 100644
--- a/lib/vdso/Makefile
+++ b/lib/vdso/Makefile
@@ -1,18 +1,3 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
-GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
-GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
-
-c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
-c-getrandom-$(CONFIG_VDSO_GETRANDOM) := $(addprefix $(GENERIC_VDSO_DIR), getrandom.c)
-
-# This cmd checks that the vdso library does not contain dynamic relocations.
-# It has to be called after the linking of the vdso library and requires it
-# as a parameter.
-#
-# As a workaround for some GNU ld ports which produce unneeded R_*_NONE
-# dynamic relocations, ignore R_*_NONE.
-quiet_cmd_vdso_check = VDSOCHK $@
- cmd_vdso_check = if $(READELF) -rW $@ | grep -v _NONE | grep -q " R_\w*_"; \
- then (echo >&2 "$@: dynamic relocations are not supported"; \
- rm -f $@; /bin/false); fi
+obj-$(CONFIG_GENERIC_VDSO_DATA_STORE) += datastore.o
diff --git a/lib/vdso/Makefile.include b/lib/vdso/Makefile.include
new file mode 100644
index 000000000000..cedbf15f8087
--- /dev/null
+++ b/lib/vdso/Makefile.include
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
+
+c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
+c-getrandom-$(CONFIG_VDSO_GETRANDOM) := $(addprefix $(GENERIC_VDSO_DIR), getrandom.c)
+
+# This cmd checks that the vdso library does not contain dynamic relocations.
+# It has to be called after the linking of the vdso library and requires it
+# as a parameter.
+#
+# As a workaround for some GNU ld ports which produce unneeded R_*_NONE
+# dynamic relocations, ignore R_*_NONE.
+quiet_cmd_vdso_check = VDSOCHK $@
+ cmd_vdso_check = if $(READELF) -rW $@ | grep -v _NONE | grep -q " R_\w*_"; \
+ then (echo >&2 "$@: dynamic relocations are not supported"; \
+ rm -f $@; /bin/false); fi
diff --git a/lib/vdso/datastore.c b/lib/vdso/datastore.c
new file mode 100644
index 000000000000..c715e217ec65
--- /dev/null
+++ b/lib/vdso/datastore.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/linkage.h>
+#include <linux/mmap_lock.h>
+#include <linux/mm.h>
+#include <linux/time_namespace.h>
+#include <linux/types.h>
+#include <linux/vdso_datastore.h>
+#include <vdso/datapage.h>
+
+/*
+ * The vDSO data page.
+ */
+#ifdef CONFIG_HAVE_GENERIC_VDSO
+static union {
+ struct vdso_time_data data;
+ u8 page[PAGE_SIZE];
+} vdso_time_data_store __page_aligned_data;
+struct vdso_time_data *vdso_k_time_data = &vdso_time_data_store.data;
+static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE);
+#endif /* CONFIG_HAVE_GENERIC_VDSO */
+
+#ifdef CONFIG_VDSO_GETRANDOM
+static union {
+ struct vdso_rng_data data;
+ u8 page[PAGE_SIZE];
+} vdso_rng_data_store __page_aligned_data;
+struct vdso_rng_data *vdso_k_rng_data = &vdso_rng_data_store.data;
+static_assert(sizeof(vdso_rng_data_store) == PAGE_SIZE);
+#endif /* CONFIG_VDSO_GETRANDOM */
+
+#ifdef CONFIG_ARCH_HAS_VDSO_ARCH_DATA
+static union {
+ struct vdso_arch_data data;
+ u8 page[VDSO_ARCH_DATA_SIZE];
+} vdso_arch_data_store __page_aligned_data;
+struct vdso_arch_data *vdso_k_arch_data = &vdso_arch_data_store.data;
+#endif /* CONFIG_ARCH_HAS_VDSO_ARCH_DATA */
+
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *timens_page = find_timens_vvar_page(vma);
+ unsigned long addr, pfn;
+ vm_fault_t err;
+
+ switch (vmf->pgoff) {
+ case VDSO_TIME_PAGE_OFFSET:
+ if (!IS_ENABLED(CONFIG_HAVE_GENERIC_VDSO))
+ return VM_FAULT_SIGBUS;
+ pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
+ if (timens_page) {
+ /*
+ * Fault in VVAR page too, since it will be accessed
+ * to get clock data anyway.
+ */
+ addr = vmf->address + VDSO_TIMENS_PAGE_OFFSET * PAGE_SIZE;
+ err = vmf_insert_pfn(vma, addr, pfn);
+ if (unlikely(err & VM_FAULT_ERROR))
+ return err;
+ pfn = page_to_pfn(timens_page);
+ }
+ break;
+ case VDSO_TIMENS_PAGE_OFFSET:
+ /*
+ * If a task belongs to a time namespace then a namespace
+ * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
+ * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
+ * offset.
+ * See also the comment near timens_setup_vdso_data().
+ */
+ if (!IS_ENABLED(CONFIG_TIME_NS) || !timens_page)
+ return VM_FAULT_SIGBUS;
+ pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
+ break;
+ case VDSO_RNG_PAGE_OFFSET:
+ if (!IS_ENABLED(CONFIG_VDSO_GETRANDOM))
+ return VM_FAULT_SIGBUS;
+ pfn = __phys_to_pfn(__pa_symbol(vdso_k_rng_data));
+ break;
+ case VDSO_ARCH_PAGES_START ... VDSO_ARCH_PAGES_END:
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_VDSO_ARCH_DATA))
+ return VM_FAULT_SIGBUS;
+ pfn = __phys_to_pfn(__pa_symbol(vdso_k_arch_data)) +
+ vmf->pgoff - VDSO_ARCH_PAGES_START;
+ break;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+const struct vm_special_mapping vdso_vvar_mapping = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+};
+
+struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr)
+{
+ return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE,
+ VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | VM_PFNMAP,
+ &vdso_vvar_mapping);
+}
+
+#ifdef CONFIG_TIME_NS
+/*
+ * The vvar page layout depends on whether a task belongs to the root or
+ * non-root time namespace. Whenever a task changes its namespace, the VVAR
+ * page tables are cleared and then they will be re-faulted with a
+ * corresponding layout.
+ * See also the comment near timens_setup_vdso_clock_data() for details.
+ */
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
+{
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ mmap_read_lock(mm);
+ for_each_vma(vmi, vma) {
+ if (vma_is_special_mapping(vma, &vdso_vvar_mapping))
+ zap_vma_pages(vma);
+ }
+ mmap_read_unlock(mm);
+
+ return 0;
+}
+#endif
diff --git a/lib/vdso/getrandom.c b/lib/vdso/getrandom.c
index 938ca539aaa6..440f8a6203a6 100644
--- a/lib/vdso/getrandom.c
+++ b/lib/vdso/getrandom.c
@@ -12,6 +12,9 @@
#include <uapi/linux/mman.h>
#include <uapi/linux/random.h>
+/* Bring in default accessors */
+#include <vdso/vsyscall.h>
+
#undef PAGE_SIZE
#undef PAGE_MASK
#define PAGE_SIZE (1UL << CONFIG_PAGE_SHIFT)
@@ -152,7 +155,7 @@ retry_generation:
/*
* Prevent the syscall from being reordered wrt current_generation. Pairs with the
- * smp_store_release(&_vdso_rng_data.generation) in random.c.
+ * smp_store_release(&vdso_k_rng_data->generation) in random.c.
*/
smp_rmb();
@@ -256,5 +259,6 @@ fallback_syscall:
static __always_inline ssize_t
__cvdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
{
- return __cvdso_getrandom_data(__arch_get_vdso_rng_data(), buffer, len, flags, opaque_state, opaque_len);
+ return __cvdso_getrandom_data(__arch_get_vdso_u_rng_data(), buffer, len, flags,
+ opaque_state, opaque_len);
}
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index c01eaafd8041..93ef801a97ef 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -5,6 +5,9 @@
#include <vdso/datapage.h>
#include <vdso/helpers.h>
+/* Bring in default accessors */
+#include <vdso/vsyscall.h>
+
#ifndef vdso_calc_ns
#ifdef VDSO_DELTA_NOMASK
@@ -14,12 +17,12 @@
#endif
#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
-static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+static __always_inline bool vdso_delta_ok(const struct vdso_clock *vc, u64 delta)
{
- return delta < vd->max_cycles;
+ return delta < vc->max_cycles;
}
#else
-static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+static __always_inline bool vdso_delta_ok(const struct vdso_clock *vc, u64 delta)
{
return true;
}
@@ -36,14 +39,14 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
* Default implementation which works for all sane clocksources. That
* obviously excludes x86/TSC.
*/
-static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
+static __always_inline u64 vdso_calc_ns(const struct vdso_clock *vc, u64 cycles, u64 base)
{
- u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
+ u64 delta = (cycles - vc->cycle_last) & VDSO_DELTA_MASK(vc);
- if (likely(vdso_delta_ok(vd, delta)))
- return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
+ if (likely(vdso_delta_ok(vc, delta)))
+ return vdso_shift_ns((delta * vc->mult) + base, vc->shift);
- return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift);
+ return mul_u64_u32_add_u64_shr(delta, vc->mult, base, vc->shift);
}
#endif /* vdso_calc_ns */
@@ -55,9 +58,9 @@ static inline bool __arch_vdso_hres_capable(void)
#endif
#ifndef vdso_clocksource_ok
-static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+static inline bool vdso_clocksource_ok(const struct vdso_clock *vc)
{
- return vd->clock_mode != VDSO_CLOCKMODE_NONE;
+ return vc->clock_mode != VDSO_CLOCKMODE_NONE;
}
#endif
@@ -69,36 +72,45 @@ static inline bool vdso_cycles_ok(u64 cycles)
#endif
#ifdef CONFIG_TIME_NS
-static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+
+#ifdef CONFIG_GENERIC_VDSO_DATA_STORE
+static __always_inline
+const struct vdso_time_data *__arch_get_vdso_u_timens_data(const struct vdso_time_data *vd)
+{
+ return (void *)vd + PAGE_SIZE;
+}
+#endif /* CONFIG_GENERIC_VDSO_DATA_STORE */
+
+static __always_inline
+int do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- const struct timens_offset *offs = &vdns->offset[clk];
+ const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
+ const struct timens_offset *offs = &vcns->offset[clk];
+ const struct vdso_clock *vc = vd->clock_data;
const struct vdso_timestamp *vdso_ts;
- const struct vdso_data *vd;
u64 cycles, ns;
u32 seq;
s64 sec;
- vd = vdns - (clk == CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE);
- vd = __arch_get_timens_vdso_data(vd);
if (clk != CLOCK_MONOTONIC_RAW)
- vd = &vd[CS_HRES_COARSE];
+ vc = &vc[CS_HRES_COARSE];
else
- vd = &vd[CS_RAW];
- vdso_ts = &vd->basetime[clk];
+ vc = &vc[CS_RAW];
+ vdso_ts = &vc->basetime[clk];
do {
- seq = vdso_read_begin(vd);
+ seq = vdso_read_begin(vc);
- if (unlikely(!vdso_clocksource_ok(vd)))
+ if (unlikely(!vdso_clocksource_ok(vc)))
return -1;
- cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+ cycles = __arch_get_hw_counter(vc->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
+ ns = vdso_calc_ns(vc, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
- } while (unlikely(vdso_read_retry(vd, seq)));
+ } while (unlikely(vdso_read_retry(vc, seq)));
/* Add the namespace offset */
sec += offs->sec;
@@ -115,22 +127,24 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_
}
#else
static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+const struct vdso_time_data *__arch_get_vdso_u_timens_data(const struct vdso_time_data *vd)
{
return NULL;
}
-static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
return -EINVAL;
}
#endif
-static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_hres(const struct vdso_time_data *vd, const struct vdso_clock *vc,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ const struct vdso_timestamp *vdso_ts = &vc->basetime[clk];
u64 cycles, sec, ns;
u32 seq;
@@ -142,31 +156,31 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
/*
* Open coded function vdso_read_begin() to handle
* VDSO_CLOCKMODE_TIMENS. Time namespace enabled tasks have a
- * special VVAR page installed which has vd->seq set to 1 and
- * vd->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non time
+ * special VVAR page installed which has vc->seq set to 1 and
+ * vc->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non time
* namespace affected tasks this does not affect performance
- * because if vd->seq is odd, i.e. a concurrent update is in
- * progress the extra check for vd->clock_mode is just a few
- * extra instructions while spin waiting for vd->seq to become
+ * because if vc->seq is odd, i.e. a concurrent update is in
+ * progress the extra check for vc->clock_mode is just a few
+ * extra instructions while spin waiting for vc->seq to become
* even again.
*/
- while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
+ while (unlikely((seq = READ_ONCE(vc->seq)) & 1)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- return do_hres_timens(vd, clk, ts);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return do_hres_timens(vd, vc, clk, ts);
cpu_relax();
}
smp_rmb();
- if (unlikely(!vdso_clocksource_ok(vd)))
+ if (unlikely(!vdso_clocksource_ok(vc)))
return -1;
- cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+ cycles = __arch_get_hw_counter(vc->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
+ ns = vdso_calc_ns(vc, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
- } while (unlikely(vdso_read_retry(vd, seq)));
+ } while (unlikely(vdso_read_retry(vc, seq)));
/*
* Do this outside the loop: a race inside the loop could result
@@ -179,21 +193,25 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
}
#ifdef CONFIG_TIME_NS
-static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- const struct vdso_data *vd = __arch_get_timens_vdso_data(vdns);
- const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
- const struct timens_offset *offs = &vdns->offset[clk];
+ const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
+ const struct timens_offset *offs = &vcns->offset[clk];
+ const struct vdso_clock *vc = vd->clock_data;
+ const struct vdso_timestamp *vdso_ts;
u64 nsec;
s64 sec;
s32 seq;
+ vdso_ts = &vc->basetime[clk];
+
do {
- seq = vdso_read_begin(vd);
+ seq = vdso_read_begin(vc);
sec = vdso_ts->sec;
nsec = vdso_ts->nsec;
- } while (unlikely(vdso_read_retry(vd, seq)));
+ } while (unlikely(vdso_read_retry(vc, seq)));
/* Add the namespace offset */
sec += offs->sec;
@@ -208,17 +226,19 @@ static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clocki
return 0;
}
#else
-static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
return -1;
}
#endif
-static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_coarse(const struct vdso_time_data *vd, const struct vdso_clock *vc,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ const struct vdso_timestamp *vdso_ts = &vc->basetime[clk];
u32 seq;
do {
@@ -226,25 +246,26 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
* Open coded function vdso_read_begin() to handle
* VDSO_CLOCK_TIMENS. See comment in do_hres().
*/
- while ((seq = READ_ONCE(vd->seq)) & 1) {
+ while ((seq = READ_ONCE(vc->seq)) & 1) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- return do_coarse_timens(vd, clk, ts);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return do_coarse_timens(vd, vc, clk, ts);
cpu_relax();
}
smp_rmb();
ts->tv_sec = vdso_ts->sec;
ts->tv_nsec = vdso_ts->nsec;
- } while (unlikely(vdso_read_retry(vd, seq)));
+ } while (unlikely(vdso_read_retry(vc, seq)));
return 0;
}
static __always_inline int
-__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
+__cvdso_clock_gettime_common(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
+ const struct vdso_clock *vc = vd->clock_data;
u32 msk;
/* Check for negative values or invalid clocks */
@@ -257,19 +278,19 @@ __cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
*/
msk = 1U << clock;
if (likely(msk & VDSO_HRES))
- vd = &vd[CS_HRES_COARSE];
+ vc = &vc[CS_HRES_COARSE];
else if (msk & VDSO_COARSE)
- return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
+ return do_coarse(vd, &vc[CS_HRES_COARSE], clock, ts);
else if (msk & VDSO_RAW)
- vd = &vd[CS_RAW];
+ vc = &vc[CS_RAW];
else
return -1;
- return do_hres(vd, clock, ts);
+ return do_hres(vd, vc, clock, ts);
}
static __maybe_unused int
-__cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
+__cvdso_clock_gettime_data(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
int ret = __cvdso_clock_gettime_common(vd, clock, ts);
@@ -282,12 +303,12 @@ __cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
static __maybe_unused int
__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
- return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
+ return __cvdso_clock_gettime_data(__arch_get_vdso_u_time_data(), clock, ts);
}
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
+__cvdso_clock_gettime32_data(const struct vdso_time_data *vd, clockid_t clock,
struct old_timespec32 *res)
{
struct __kernel_timespec ts;
@@ -308,19 +329,20 @@ __cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
static __maybe_unused int
__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
{
- return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
+ return __cvdso_clock_gettime32_data(__arch_get_vdso_u_time_data(), clock, res);
}
#endif /* BUILD_VDSO32 */
static __maybe_unused int
-__cvdso_gettimeofday_data(const struct vdso_data *vd,
+__cvdso_gettimeofday_data(const struct vdso_time_data *vd,
struct __kernel_old_timeval *tv, struct timezone *tz)
{
+ const struct vdso_clock *vc = vd->clock_data;
if (likely(tv != NULL)) {
struct __kernel_timespec ts;
- if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
+ if (do_hres(vd, &vc[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
return gettimeofday_fallback(tv, tz);
tv->tv_sec = ts.tv_sec;
@@ -329,8 +351,8 @@ __cvdso_gettimeofday_data(const struct vdso_data *vd,
if (unlikely(tz != NULL)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data(vd);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_vdso_u_timens_data(vd);
tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
@@ -342,20 +364,23 @@ __cvdso_gettimeofday_data(const struct vdso_data *vd,
static __maybe_unused int
__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
- return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
+ return __cvdso_gettimeofday_data(__arch_get_vdso_u_time_data(), tv, tz);
}
#ifdef VDSO_HAS_TIME
static __maybe_unused __kernel_old_time_t
-__cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
+__cvdso_time_data(const struct vdso_time_data *vd, __kernel_old_time_t *time)
{
+ const struct vdso_clock *vc = vd->clock_data;
__kernel_old_time_t t;
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data(vd);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS) {
+ vd = __arch_get_vdso_u_timens_data(vd);
+ vc = vd->clock_data;
+ }
- t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
+ t = READ_ONCE(vc[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
if (time)
*time = t;
@@ -365,15 +390,16 @@ __cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
{
- return __cvdso_time_data(__arch_get_vdso_data(), time);
+ return __cvdso_time_data(__arch_get_vdso_u_time_data(), time);
}
#endif /* VDSO_HAS_TIME */
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
+int __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *res)
{
+ const struct vdso_clock *vc = vd->clock_data;
u32 msk;
u64 ns;
@@ -382,8 +408,8 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
return -1;
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data(vd);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_vdso_u_timens_data(vd);
/*
* Convert the clockid to a bitmask and use it to check which
@@ -394,7 +420,7 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
/*
* Preserves the behaviour of posix_get_hrtimer_res().
*/
- ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+ ns = READ_ONCE(vd->hrtimer_res);
} else if (msk & VDSO_COARSE) {
/*
* Preserves the behaviour of posix_get_coarse_res().
@@ -412,7 +438,7 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
}
static __maybe_unused
-int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
+int __cvdso_clock_getres_data(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *res)
{
int ret = __cvdso_clock_getres_common(vd, clock, res);
@@ -425,12 +451,12 @@ int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
static __maybe_unused
int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
{
- return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
+ return __cvdso_clock_getres_data(__arch_get_vdso_u_time_data(), clock, res);
}
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
+__cvdso_clock_getres_time32_data(const struct vdso_time_data *vd, clockid_t clock,
struct old_timespec32 *res)
{
struct __kernel_timespec ts;
@@ -451,7 +477,7 @@ __cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
static __maybe_unused int
__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
{
- return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
+ return __cvdso_clock_getres_time32_data(__arch_get_vdso_u_time_data(),
clock, res);
}
#endif /* BUILD_VDSO32 */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 56fe96319292..734bd70c8b9b 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -114,6 +114,13 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtoul);
+unsigned long simple_strntoul(const char *cp, char **endp, unsigned int base,
+ size_t max_chars)
+{
+ return simple_strntoull(cp, endp, base, max_chars);
+}
+EXPORT_SYMBOL(simple_strntoul);
+
/**
* simple_strtol - convert a string to a signed long
* @cp: The start of the string
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
index 20f08c644b71..be218b5e0ed5 100644
--- a/lib/zstd/Makefile
+++ b/lib/zstd/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
# ################################################################
-# Copyright (c) Facebook, Inc.
+# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under both the BSD-style license (found in the
@@ -26,6 +26,7 @@ zstd_compress-y := \
compress/zstd_lazy.o \
compress/zstd_ldm.o \
compress/zstd_opt.o \
+ compress/zstd_preSplit.o \
zstd_decompress-y := \
zstd_decompress_module.o \
diff --git a/lib/zstd/common/allocations.h b/lib/zstd/common/allocations.h
new file mode 100644
index 000000000000..16c3d08e8d1a
--- /dev/null
+++ b/lib/zstd/common/allocations.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* This file provides custom allocation primitives
+ */
+
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
+
+#include "compiler.h" /* MEM_STATIC */
+#define ZSTD_STATIC_LINKING_ONLY
+#include <linux/zstd.h> /* ZSTD_customMem */
+
+#ifndef ZSTD_ALLOCATIONS_H
+#define ZSTD_ALLOCATIONS_H
+
+/* custom memory allocation functions */
+
+MEM_STATIC void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc)
+ return customMem.customAlloc(customMem.opaque, size);
+ return ZSTD_malloc(size);
+}
+
+MEM_STATIC void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc) {
+ /* calloc implemented as malloc+memset;
+ * not as efficient as calloc, but next best guess for custom malloc */
+ void* const ptr = customMem.customAlloc(customMem.opaque, size);
+ ZSTD_memset(ptr, 0, size);
+ return ptr;
+ }
+ return ZSTD_calloc(1, size);
+}
+
+MEM_STATIC void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
+{
+ if (ptr!=NULL) {
+ if (customMem.customFree)
+ customMem.customFree(customMem.opaque, ptr);
+ else
+ ZSTD_free(ptr);
+ }
+}
+
+#endif /* ZSTD_ALLOCATIONS_H */
diff --git a/lib/zstd/common/bits.h b/lib/zstd/common/bits.h
new file mode 100644
index 000000000000..c5faaa3d7b08
--- /dev/null
+++ b/lib/zstd/common/bits.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_BITS_H
+#define ZSTD_BITS_H
+
+#include "mem.h"
+
+MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val)
+{
+ assert(val != 0);
+ {
+ static const U32 DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3,
+ 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7,
+ 26, 12, 18, 6, 11, 5, 10, 9};
+ return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >> 27];
+ }
+}
+
+MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val)
+{
+ assert(val != 0);
+#if (__GNUC__ >= 4)
+ return (unsigned)__builtin_ctz(val);
+#else
+ return ZSTD_countTrailingZeros32_fallback(val);
+#endif
+}
+
+MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val)
+{
+ assert(val != 0);
+ {
+ static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29,
+ 11, 14, 16, 18, 22, 25, 3, 30,
+ 8, 12, 20, 28, 15, 17, 24, 7,
+ 19, 27, 23, 6, 26, 5, 4, 31};
+ val |= val >> 1;
+ val |= val >> 2;
+ val |= val >> 4;
+ val |= val >> 8;
+ val |= val >> 16;
+ return 31 - DeBruijnClz[(val * 0x07C4ACDDU) >> 27];
+ }
+}
+
+MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val)
+{
+ assert(val != 0);
+#if (__GNUC__ >= 4)
+ return (unsigned)__builtin_clz(val);
+#else
+ return ZSTD_countLeadingZeros32_fallback(val);
+#endif
+}
+
+MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val)
+{
+ assert(val != 0);
+#if (__GNUC__ >= 4) && defined(__LP64__)
+ return (unsigned)__builtin_ctzll(val);
+#else
+ {
+ U32 mostSignificantWord = (U32)(val >> 32);
+ U32 leastSignificantWord = (U32)val;
+ if (leastSignificantWord == 0) {
+ return 32 + ZSTD_countTrailingZeros32(mostSignificantWord);
+ } else {
+ return ZSTD_countTrailingZeros32(leastSignificantWord);
+ }
+ }
+#endif
+}
+
+MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val)
+{
+ assert(val != 0);
+#if (__GNUC__ >= 4)
+ return (unsigned)(__builtin_clzll(val));
+#else
+ {
+ U32 mostSignificantWord = (U32)(val >> 32);
+ U32 leastSignificantWord = (U32)val;
+ if (mostSignificantWord == 0) {
+ return 32 + ZSTD_countLeadingZeros32(leastSignificantWord);
+ } else {
+ return ZSTD_countLeadingZeros32(mostSignificantWord);
+ }
+ }
+#endif
+}
+
+MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val)
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
+ return ZSTD_countTrailingZeros64((U64)val) >> 3;
+ } else {
+ return ZSTD_countTrailingZeros32((U32)val) >> 3;
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
+ return ZSTD_countLeadingZeros64((U64)val) >> 3;
+ } else {
+ return ZSTD_countLeadingZeros32((U32)val) >> 3;
+ }
+ }
+}
+
+MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
+{
+ assert(val != 0);
+ return 31 - ZSTD_countLeadingZeros32(val);
+}
+
+/* ZSTD_rotateRight_*():
+ * Rotates a bitfield to the right by "count" bits.
+ * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
+ */
+MEM_STATIC
+U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
+ assert(count < 64);
+ count &= 0x3F; /* for fickle pattern recognition */
+ return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
+}
+
+MEM_STATIC
+U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
+ assert(count < 32);
+ count &= 0x1F; /* for fickle pattern recognition */
+ return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
+}
+
+MEM_STATIC
+U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
+ assert(count < 16);
+ count &= 0x0F; /* for fickle pattern recognition */
+ return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
+}
+
+#endif /* ZSTD_BITS_H */
diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
index feef3a1b1d60..86439da0eea7 100644
--- a/lib/zstd/common/bitstream.h
+++ b/lib/zstd/common/bitstream.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* bitstream
* Part of FSE library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -27,7 +28,7 @@
#include "compiler.h" /* UNLIKELY() */
#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
#include "error_private.h" /* error codes and messages */
-
+#include "bits.h" /* ZSTD_highbit32 */
/*=========================================
* Target specific
@@ -41,12 +42,13 @@
/*-******************************************
* bitStream encoding API (write forward)
********************************************/
+typedef size_t BitContainerType;
/* bitStream can mix input from multiple sources.
* A critical property of these streams is that they encode and decode in **reverse** direction.
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
*/
typedef struct {
- size_t bitContainer;
+ BitContainerType bitContainer;
unsigned bitPos;
char* startPtr;
char* ptr;
@@ -54,7 +56,7 @@ typedef struct {
} BIT_CStream_t;
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
-MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits);
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
@@ -63,7 +65,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
*
* bits are first added to a local register.
-* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+* Local register is BitContainerType, 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
* Writing data into memory is an explicit operation, performed by the flushBits function.
* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
* After a flushBits, a maximum of 7 bits might still be stored into local register.
@@ -80,28 +82,28 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
* bitStream decoding API (read backward)
**********************************************/
typedef struct {
- size_t bitContainer;
+ BitContainerType bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
const char* limitPtr;
} BIT_DStream_t;
-typedef enum { BIT_DStream_unfinished = 0,
- BIT_DStream_endOfBuffer = 1,
- BIT_DStream_completed = 2,
- BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
- /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+typedef enum { BIT_DStream_unfinished = 0, /* fully refilled */
+ BIT_DStream_endOfBuffer = 1, /* still some bits left in bitstream */
+ BIT_DStream_completed = 2, /* bitstream entirely consumed, bit-exact */
+ BIT_DStream_overflow = 3 /* user requested more bits than present in bitstream */
+ } BIT_DStream_status; /* result of BIT_reloadDStream() */
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
/* Start by invoking BIT_initDStream().
* A chunk of the bitStream is then stored into a local register.
-* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (BitContainerType).
* You can then retrieve bitFields stored into the local register, **in reverse order**.
* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
@@ -113,7 +115,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
/*-****************************************
* unsafe API
******************************************/
-MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits);
/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
@@ -122,33 +124,6 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
-
-
-/*-**************************************************************
-* Internal functions
-****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
-{
- assert(val != 0);
- {
-# if (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
-# else /* Software version */
- static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
- 11, 14, 16, 18, 22, 25, 3, 30,
- 8, 12, 20, 28, 15, 17, 24, 7,
- 19, 27, 23, 6, 26, 5, 4, 31 };
- U32 v = val;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
-# endif
- }
-}
-
/*===== Local Constants =====*/
static const unsigned BIT_mask[] = {
0, 1, 3, 7, 0xF, 0x1F,
@@ -178,16 +153,22 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
return 0;
}
+FORCE_INLINE_TEMPLATE BitContainerType BIT_getLowerBits(BitContainerType bitContainer, U32 const nbBits)
+{
+ assert(nbBits < BIT_MASK_SIZE);
+ return bitContainer & BIT_mask[nbBits];
+}
+
/*! BIT_addBits() :
* can add up to 31 bits into `bitC`.
* Note : does not check for register overflow ! */
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
- size_t value, unsigned nbBits)
+ BitContainerType value, unsigned nbBits)
{
DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
assert(nbBits < BIT_MASK_SIZE);
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
- bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+ bitC->bitContainer |= BIT_getLowerBits(value, nbBits) << bitC->bitPos;
bitC->bitPos += nbBits;
}
@@ -195,7 +176,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
* works only if `value` is _clean_,
* meaning all high bits above nbBits are 0 */
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
- size_t value, unsigned nbBits)
+ BitContainerType value, unsigned nbBits)
{
assert((value>>nbBits) == 0);
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
@@ -242,7 +223,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
BIT_addBitsFast(bitC, 1, 1); /* endMark */
BIT_flushBits(bitC);
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
- return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+ return (size_t)(bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
}
@@ -266,35 +247,35 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
- bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
+ bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
} else {
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
- case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+ case 7: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
ZSTD_FALLTHROUGH;
- case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+ case 6: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
ZSTD_FALLTHROUGH;
- case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+ case 5: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
ZSTD_FALLTHROUGH;
- case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+ case 4: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[3]) << 24;
ZSTD_FALLTHROUGH;
- case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+ case 3: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[2]) << 16;
ZSTD_FALLTHROUGH;
- case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
+ case 2: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[1]) << 8;
ZSTD_FALLTHROUGH;
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
- bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0;
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
}
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
@@ -303,12 +284,12 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
return srcSize;
}
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_getUpperBits(BitContainerType bitContainer, U32 const start)
{
return bitContainer >> start;
}
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_getMiddleBits(BitContainerType bitContainer, U32 const start, U32 const nbBits)
{
U32 const regMask = sizeof(bitContainer)*8 - 1;
/* if start > regMask, bitstream is corrupted, and result is undefined */
@@ -318,26 +299,20 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c
* such cpus old (pre-Haswell, 2013) and their performance is not of that
* importance.
*/
-#if defined(__x86_64__) || defined(_M_X86)
+#if defined(__x86_64__) || defined(_M_X64)
return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1);
#else
return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
#endif
}
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
-{
- assert(nbBits < BIT_MASK_SIZE);
- return bitContainer & BIT_mask[nbBits];
-}
-
/*! BIT_lookBits() :
* Provides next n bits from local register.
* local register is not modified.
* On 32-bits, maxNbBits==24.
* On 64-bits, maxNbBits==56.
* @return : value extracted */
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
{
/* arbitrate between double-shift and shift+mask */
#if 1
@@ -353,14 +328,14 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U3
/*! BIT_lookBitsFast() :
* unsafe version; only works if nbBits >= 1 */
-MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+MEM_STATIC BitContainerType BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
{
U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
assert(nbBits >= 1);
return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
}
-MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+FORCE_INLINE_TEMPLATE void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
@@ -369,23 +344,38 @@ MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
* @return : extracted value. */
-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
+FORCE_INLINE_TEMPLATE BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
{
- size_t const value = BIT_lookBits(bitD, nbBits);
+ BitContainerType const value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
/*! BIT_readBitsFast() :
- * unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
+ * unsafe version; only works if nbBits >= 1 */
+MEM_STATIC BitContainerType BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
{
- size_t const value = BIT_lookBitsFast(bitD, nbBits);
+ BitContainerType const value = BIT_lookBitsFast(bitD, nbBits);
assert(nbBits >= 1);
BIT_skipBits(bitD, nbBits);
return value;
}
+/*! BIT_reloadDStream_internal() :
+ * Simple variant of BIT_reloadDStream(), with two conditions:
+ * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
+ * 2. look window is valid after shifted down : bitD->ptr >= bitD->start
+ */
+MEM_STATIC BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD)
+{
+ assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ assert(bitD->ptr >= bitD->start);
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+}
+
/*! BIT_reloadDStreamFast() :
* Similar to BIT_reloadDStream(), but with two differences:
* 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
@@ -396,31 +386,35 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
{
if (UNLIKELY(bitD->ptr < bitD->limitPtr))
return BIT_DStream_overflow;
- assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
- bitD->ptr -= bitD->bitsConsumed >> 3;
- bitD->bitsConsumed &= 7;
- bitD->bitContainer = MEM_readLEST(bitD->ptr);
- return BIT_DStream_unfinished;
+ return BIT_reloadDStream_internal(bitD);
}
/*! BIT_reloadDStream() :
* Refill `bitD` from buffer previously set in BIT_initDStream() .
- * This function is safe, it guarantees it will not read beyond src buffer.
+ * This function is safe, it guarantees it will not never beyond src buffer.
* @return : status of `BIT_DStream_t` internal register.
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+FORCE_INLINE_TEMPLATE BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
+ /* note : once in overflow mode, a bitstream remains in this mode until it's reset */
+ if (UNLIKELY(bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))) {
+ static const BitContainerType zeroFilled = 0;
+ bitD->ptr = (const char*)&zeroFilled; /* aliasing is allowed for char */
+ /* overflow detected, erroneous scenario or end of stream: no update */
return BIT_DStream_overflow;
+ }
+
+ assert(bitD->ptr >= bitD->start);
if (bitD->ptr >= bitD->limitPtr) {
- return BIT_reloadDStreamFast(bitD);
+ return BIT_reloadDStream_internal(bitD);
}
if (bitD->ptr == bitD->start) {
+ /* reached end of bitStream => no update */
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
return BIT_DStream_completed;
}
- /* start < ptr < limitPtr */
+ /* start < ptr < limitPtr => cautious update */
{ U32 nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start) {
@@ -442,5 +436,4 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
-
#endif /* BITSTREAM_H_MODULE */
diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
index c42d39faf9bd..dc9bd15e174e 100644
--- a/lib/zstd/common/compiler.h
+++ b/lib/zstd/common/compiler.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,6 +12,8 @@
#ifndef ZSTD_COMPILER_H
#define ZSTD_COMPILER_H
+#include <linux/types.h>
+
#include "portability_macros.h"
/*-*******************************************************
@@ -41,12 +44,15 @@
*/
#define WIN_CDECL
+/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
+#define UNUSED_ATTR __attribute__((unused))
+
/*
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
* parameters. They must be inlined for the compiler to eliminate the constant
* branches.
*/
-#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR
/*
* HINT_INLINE is used to help the compiler generate better code. It is *not*
* used for "templates", so it can be tweaked based on the compilers
@@ -61,11 +67,21 @@
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
# define HINT_INLINE static INLINE_KEYWORD
#else
-# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
+# define HINT_INLINE FORCE_INLINE_TEMPLATE
#endif
-/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
-#define UNUSED_ATTR __attribute__((unused))
+/* "soft" inline :
+ * The compiler is free to select if it's a good idea to inline or not.
+ * The main objective is to silence compiler warnings
+ * when a defined function in included but not used.
+ *
+ * Note : this macro is prefixed `MEM_` because it used to be provided by `mem.h` unit.
+ * Updating the prefix is probably preferable, but requires a fairly large codemod,
+ * since this name is used everywhere.
+ */
+#ifndef MEM_STATIC /* already defined in Linux Kernel mem.h */
+#define MEM_STATIC static __inline UNUSED_ATTR
+#endif
/* force no inlining */
#define FORCE_NOINLINE static __attribute__((__noinline__))
@@ -86,23 +102,24 @@
# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
#elif defined(__aarch64__)
-# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
-# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
+# define PREFETCH_L1(ptr) do { __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))); } while (0)
+# define PREFETCH_L2(ptr) do { __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))); } while (0)
#else
-# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
-# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
+# define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */
+# define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */
#endif /* NO_PREFETCH */
#define CACHELINE_SIZE 64
-#define PREFETCH_AREA(p, s) { \
- const char* const _ptr = (const char*)(p); \
- size_t const _size = (size_t)(s); \
- size_t _pos; \
- for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
- PREFETCH_L2(_ptr + _pos); \
- } \
-}
+#define PREFETCH_AREA(p, s) \
+ do { \
+ const char* const _ptr = (const char*)(p); \
+ size_t const _size = (size_t)(s); \
+ size_t _pos; \
+ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
+ PREFETCH_L2(_ptr + _pos); \
+ } \
+ } while (0)
/* vectorization
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
@@ -126,16 +143,13 @@
#define UNLIKELY(x) (__builtin_expect((x), 0))
#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
-# define ZSTD_UNREACHABLE { assert(0), __builtin_unreachable(); }
+# define ZSTD_UNREACHABLE do { assert(0), __builtin_unreachable(); } while (0)
#else
-# define ZSTD_UNREACHABLE { assert(0); }
+# define ZSTD_UNREACHABLE do { assert(0); } while (0)
#endif
/* disable warnings */
-/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
-
-
/* compile time determination of SIMD support */
/* C-language Attributes are added in C23. */
@@ -158,9 +172,15 @@
#define ZSTD_FALLTHROUGH fallthrough
/*-**************************************************************
-* Alignment check
+* Alignment
*****************************************************************/
+/* @return 1 if @u is a 2^n value, 0 otherwise
+ * useful to check a value is valid for alignment restrictions */
+MEM_STATIC int ZSTD_isPower2(size_t u) {
+ return (u & (u-1)) == 0;
+}
+
/* this test was initially positioned in mem.h,
* but this file is removed (or replaced) for linux kernel
* so it's now hosted in compiler.h,
@@ -175,10 +195,95 @@
#endif /* ZSTD_ALIGNOF */
+#ifndef ZSTD_ALIGNED
+/* C90-compatible alignment macro (GCC/Clang). Adjust for other compilers if needed. */
+#define ZSTD_ALIGNED(a) __attribute__((aligned(a)))
+#endif /* ZSTD_ALIGNED */
+
+
/*-**************************************************************
* Sanitizer
*****************************************************************/
+/*
+ * Zstd relies on pointer overflow in its decompressor.
+ * We add this attribute to functions that rely on pointer overflow.
+ */
+#ifndef ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+# if __has_attribute(no_sanitize)
+# if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 8
+ /* gcc < 8 only has signed-integer-overlow which triggers on pointer overflow */
+# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("signed-integer-overflow")))
+# else
+ /* older versions of clang [3.7, 5.0) will warn that pointer-overflow is ignored. */
+# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("pointer-overflow")))
+# endif
+# else
+# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+# endif
+#endif
+
+/*
+ * Helper function to perform a wrapped pointer difference without triggering
+ * UBSAN.
+ *
+ * @returns lhs - rhs with wrapping
+ */
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs)
+{
+ return lhs - rhs;
+}
+
+/*
+ * Helper function to perform a wrapped pointer add without triggering UBSAN.
+ *
+ * @return ptr + add with wrapping
+ */
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add)
+{
+ return ptr + add;
+}
+
+/*
+ * Helper function to perform a wrapped pointer subtraction without triggering
+ * UBSAN.
+ *
+ * @return ptr - sub with wrapping
+ */
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+unsigned char const* ZSTD_wrappedPtrSub(unsigned char const* ptr, ptrdiff_t sub)
+{
+ return ptr - sub;
+}
+
+/*
+ * Helper function to add to a pointer that works around C's undefined behavior
+ * of adding 0 to NULL.
+ *
+ * @returns `ptr + add` except it defines `NULL + 0 == NULL`.
+ */
+MEM_STATIC
+unsigned char* ZSTD_maybeNullPtrAdd(unsigned char* ptr, ptrdiff_t add)
+{
+ return add > 0 ? ptr + add : ptr;
+}
+
+/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
+ * abundance of caution, disable our custom poisoning on mingw. */
+#ifdef __MINGW32__
+#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE
+#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1
+#endif
+#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE
+#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1
+#endif
+#endif
+
#endif /* ZSTD_COMPILER_H */
diff --git a/lib/zstd/common/cpu.h b/lib/zstd/common/cpu.h
index 0db7b42407ee..d8319a2bef4c 100644
--- a/lib/zstd/common/cpu.h
+++ b/lib/zstd/common/cpu.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
index bb863c9ea616..8eb6aa9a3b20 100644
--- a/lib/zstd/common/debug.c
+++ b/lib/zstd/common/debug.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* debug
* Part of FSE library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -21,4 +22,10 @@
#include "debug.h"
+#if (DEBUGLEVEL>=2)
+/* We only use this when DEBUGLEVEL>=2, but we get -Werror=pedantic errors if a
+ * translation unit is empty. So remove this from Linux kernel builds, but
+ * otherwise just leave it in.
+ */
int g_debuglevel = DEBUGLEVEL;
+#endif
diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
index 6dd88d1fbd02..c8a10281f112 100644
--- a/lib/zstd/common/debug.h
+++ b/lib/zstd/common/debug.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* debug
* Part of FSE library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -33,7 +34,6 @@
#define DEBUG_H_12987983217
-
/* static assert is triggered at compile time, leaving no runtime artefact.
* static assert only works with compile-time constants.
* Also, this variant can only be used inside a function. */
@@ -82,20 +82,27 @@ extern int g_debuglevel; /* the variable is only declared,
It's useful when enabling very verbose levels
on selective conditions (such as position in src) */
-# define RAWLOG(l, ...) { \
- if (l<=g_debuglevel) { \
- ZSTD_DEBUG_PRINT(__VA_ARGS__); \
- } }
-# define DEBUGLOG(l, ...) { \
- if (l<=g_debuglevel) { \
- ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
- ZSTD_DEBUG_PRINT(" \n"); \
- } }
+# define RAWLOG(l, ...) \
+ do { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+#define LINE_AS_STRING TOSTRING(__LINE__)
+
+# define DEBUGLOG(l, ...) \
+ do { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__FILE__ ":" LINE_AS_STRING ": " __VA_ARGS__); \
+ ZSTD_DEBUG_PRINT(" \n"); \
+ } \
+ } while (0)
#else
-# define RAWLOG(l, ...) {} /* disabled */
-# define DEBUGLOG(l, ...) {} /* disabled */
+# define RAWLOG(l, ...) do { } while (0) /* disabled */
+# define DEBUGLOG(l, ...) do { } while (0) /* disabled */
#endif
-
-
#endif /* DEBUG_H_12987983217 */
diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
index fef67056f052..6cdd82233fb5 100644
--- a/lib/zstd/common/entropy_common.c
+++ b/lib/zstd/common/entropy_common.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* Common functions of New Generation Entropy library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -19,8 +20,8 @@
#include "error_private.h" /* ERR_*, ERROR */
#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
#include "huf.h"
+#include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */
/*=== Version ===*/
@@ -38,23 +39,6 @@ const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
-static U32 FSE_ctz(U32 val)
-{
- assert(val != 0);
- {
-# if (__GNUC__ >= 3) /* GCC Intrinsic */
- return __builtin_ctz(val);
-# else /* Software version */
- U32 count = 0;
- while ((val & 1) == 0) {
- val >>= 1;
- ++count;
- }
- return count;
-# endif
- }
-}
-
FORCE_INLINE_TEMPLATE
size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
@@ -102,7 +86,7 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne
* repeat.
* Avoid UB by setting the high bit to 1.
*/
- int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ int repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
while (repeats >= 12) {
charnum += 3 * 12;
if (LIKELY(ip <= iend-7)) {
@@ -113,7 +97,7 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
- repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
}
charnum += 3 * repeats;
bitStream >>= 2 * repeats;
@@ -178,7 +162,7 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne
* know that threshold > 1.
*/
if (remaining <= 1) break;
- nbBits = BIT_highbit32(remaining) + 1;
+ nbBits = ZSTD_highbit32(remaining) + 1;
threshold = 1 << (nbBits - 1);
}
if (charnum >= maxSV1) break;
@@ -253,7 +237,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
const void* src, size_t srcSize)
{
U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
- return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
+ return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0);
}
FORCE_INLINE_TEMPLATE size_t
@@ -301,14 +285,14 @@ HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
- { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+ { U32 const tableLog = ZSTD_highbit32(weightTotal) + 1;
if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
*tableLogPtr = tableLog;
/* determine last weight */
{ U32 const total = 1 << tableLog;
U32 const rest = total - weightTotal;
- U32 const verif = 1 << BIT_highbit32(rest);
- U32 const lastWeight = BIT_highbit32(rest) + 1;
+ U32 const verif = 1 << ZSTD_highbit32(rest);
+ U32 const lastWeight = ZSTD_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
@@ -345,13 +329,13 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize,
- int bmi2)
+ int flags)
{
#if DYNAMIC_BMI2
- if (bmi2) {
+ if (flags & HUF_flags_bmi2) {
return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
#endif
- (void)bmi2;
+ (void)flags;
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c
index 6d1135f8c373..6c3dbad838b6 100644
--- a/lib/zstd/common/error_private.c
+++ b/lib/zstd/common/error_private.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -27,9 +28,11 @@ const char* ERR_getErrorString(ERR_enum code)
case PREFIX(version_unsupported): return "Version not supported";
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
- case PREFIX(corruption_detected): return "Corrupted block detected";
+ case PREFIX(corruption_detected): return "Data corruption detected";
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+ case PREFIX(literals_headerWrong): return "Header of Literals' block doesn't respect format specification";
case PREFIX(parameter_unsupported): return "Unsupported parameter";
+ case PREFIX(parameter_combination_unsupported): return "Unsupported combination of parameters";
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
case PREFIX(init_missing): return "Context should be init first";
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
@@ -38,17 +41,23 @@ const char* ERR_getErrorString(ERR_enum code)
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+ case PREFIX(cannotProduce_uncompressedBlock): return "This mode cannot generate an uncompressed block";
+ case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected";
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
case PREFIX(srcSize_wrong): return "Src size is incorrect";
case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
+ case PREFIX(noForwardProgress_destFull): return "Operation made no progress over multiple calls, due to output buffer being full";
+ case PREFIX(noForwardProgress_inputEmpty): return "Operation made no progress over multiple calls, due to input being empty";
/* following error codes are not stable and may be removed or changed in a future version */
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
+ case PREFIX(sequenceProducer_failed): return "Block-level external sequence producer returned an error code";
+ case PREFIX(externalSequences_invalid): return "External sequences are not valid";
case PREFIX(maxCode):
default: return notErrorCode;
}
diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
index ca5101e542fa..08ee87b68cca 100644
--- a/lib/zstd/common/error_private.h
+++ b/lib/zstd/common/error_private.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -13,8 +14,6 @@
#ifndef ERROR_H_MODULE
#define ERROR_H_MODULE
-
-
/* ****************************************
* Dependencies
******************************************/
@@ -23,7 +22,6 @@
#include "debug.h"
#include "zstd_deps.h" /* size_t */
-
/* ****************************************
* Compiler-specific
******************************************/
@@ -49,8 +47,13 @@ ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
/* check and forward error code */
-#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
-#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
+#define CHECK_V_F(e, f) \
+ size_t const e = f; \
+ do { \
+ if (ERR_isError(e)) \
+ return e; \
+ } while (0)
+#define CHECK_F(f) do { CHECK_V_F(_var_err__, f); } while (0)
/*-****************************************
@@ -84,10 +87,12 @@ void _force_has_format_string(const char *format, ...) {
* We want to force this function invocation to be syntactically correct, but
* we don't want to force runtime evaluation of its arguments.
*/
-#define _FORCE_HAS_FORMAT_STRING(...) \
- if (0) { \
- _force_has_format_string(__VA_ARGS__); \
- }
+#define _FORCE_HAS_FORMAT_STRING(...) \
+ do { \
+ if (0) { \
+ _force_has_format_string(__VA_ARGS__); \
+ } \
+ } while (0)
#define ERR_QUOTE(str) #str
@@ -98,48 +103,49 @@ void _force_has_format_string(const char *format, ...) {
* In order to do that (particularly, printing the conditional that failed),
* this can't just wrap RETURN_ERROR().
*/
-#define RETURN_ERROR_IF(cond, err, ...) \
- if (cond) { \
- RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
- __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
- _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
- RAWLOG(3, ": " __VA_ARGS__); \
- RAWLOG(3, "\n"); \
- return ERROR(err); \
- }
+#define RETURN_ERROR_IF(cond, err, ...) \
+ do { \
+ if (cond) { \
+ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
+ __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ } \
+ } while (0)
/*
* Unconditionally return the specified error.
*
* In debug modes, prints additional information.
*/
-#define RETURN_ERROR(err, ...) \
- do { \
- RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
- __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
- _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
- RAWLOG(3, ": " __VA_ARGS__); \
- RAWLOG(3, "\n"); \
- return ERROR(err); \
- } while(0);
+#define RETURN_ERROR(err, ...) \
+ do { \
+ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
+ __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ } while(0)
/*
* If the provided expression evaluates to an error code, returns that error code.
*
* In debug modes, prints additional information.
*/
-#define FORWARD_IF_ERROR(err, ...) \
- do { \
- size_t const err_code = (err); \
- if (ERR_isError(err_code)) { \
- RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
- __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
- _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
- RAWLOG(3, ": " __VA_ARGS__); \
- RAWLOG(3, "\n"); \
- return err_code; \
- } \
- } while(0);
-
+#define FORWARD_IF_ERROR(err, ...) \
+ do { \
+ size_t const err_code = (err); \
+ if (ERR_isError(err_code)) { \
+ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
+ __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return err_code; \
+ } \
+ } while(0)
#endif /* ERROR_H_MODULE */
diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
index 4507043b2287..b36ce7a2a8c3 100644
--- a/lib/zstd/common/fse.h
+++ b/lib/zstd/common/fse.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* FSE : Finite State Entropy codec
* Public Prototypes declaration
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -11,8 +12,6 @@
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
-
-
#ifndef FSE_H
#define FSE_H
@@ -22,7 +21,6 @@
******************************************/
#include "zstd_deps.h" /* size_t, ptrdiff_t */
-
/*-*****************************************
* FSE_PUBLIC_API : control library symbols visibility
******************************************/
@@ -50,34 +48,6 @@
FSE_PUBLIC_API unsigned FSE_versionNumber(void); /*< library version number; to be used when checking dll version */
-/*-****************************************
-* FSE simple functions
-******************************************/
-/*! FSE_compress() :
- Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
- 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
- @return : size of compressed data (<= dstCapacity).
- Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
- if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
- if FSE_isError(return), compression failed (more details using FSE_getErrorName())
-*/
-FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize);
-
-/*! FSE_decompress():
- Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
- into already allocated destination buffer 'dst', of size 'dstCapacity'.
- @return : size of regenerated data (<= maxDstSize),
- or an error code, which can be tested using FSE_isError() .
-
- ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
- Why ? : making this distinction requires a header.
- Header management is intentionally delegated to the user layer, which can better manage special cases.
-*/
-FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
- const void* cSrc, size_t cSrcSize);
-
-
/*-*****************************************
* Tool functions
******************************************/
@@ -89,20 +59,6 @@ FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error co
/*-*****************************************
-* FSE advanced functions
-******************************************/
-/*! FSE_compress2() :
- Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
- Both parameters can be defined as '0' to mean : use default value
- @return : size of compressed data
- Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
- if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
- if FSE_isError(return), it's an error code.
-*/
-FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
-
-
-/*-*****************************************
* FSE detailed API
******************************************/
/*!
@@ -161,8 +117,6 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
/*! Constructor and Destructor of FSE_CTable.
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
-FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
-FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
/*! FSE_buildCTable():
Builds `ct`, which must be already allocated, using FSE_createCTable().
@@ -238,23 +192,7 @@ FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
const void* rBuffer, size_t rBuffSize, int bmi2);
-/*! Constructor and Destructor of FSE_DTable.
- Note that its size depends on 'tableLog' */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
-FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
-FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
-
-/*! FSE_buildDTable():
- Builds 'dt', which must be already allocated, using FSE_createDTable().
- return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
-/*! FSE_decompress_usingDTable():
- Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
- into `dst` which must be already allocated.
- @return : size of regenerated data (necessarily <= `dstCapacity`),
- or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
/*!
Tutorial :
@@ -286,13 +224,11 @@ If there is an error, the function will return an error code, which can be teste
#endif /* FSE_H */
+
#if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
#define FSE_H_FSE_STATIC_LINKING_ONLY
-
-/* *** Dependency *** */
#include "bitstream.h"
-
/* *****************************************
* Static allocation
*******************************************/
@@ -317,16 +253,6 @@ If there is an error, the function will return an error code, which can be teste
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
/*< same as FSE_optimalTableLog(), which used `minus==2` */
-/* FSE_compress_wksp() :
- * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
- * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
- */
-#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
-size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
-
-size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
-/*< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
-
size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
/*< build a fake FSE_CTable, designed to compress always the same symbolValue */
@@ -344,19 +270,11 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi
FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
/*< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
-size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
-/*< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
-
-size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
-/*< build a fake FSE_DTable, designed to always generate the same symbolValue */
-
-#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
+#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
-size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
-/*< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
-
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
-/*< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
+/*< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`.
+ * Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */
typedef enum {
FSE_repeat_none, /*< Cannot use the previous table */
@@ -539,20 +457,20 @@ MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, un
FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
const U16* const stateTable = (const U16*)(statePtr->stateTable);
U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
- BIT_addBits(bitC, statePtr->value, nbBitsOut);
+ BIT_addBits(bitC, (BitContainerType)statePtr->value, nbBitsOut);
statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
}
MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
{
- BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+ BIT_addBits(bitC, (BitContainerType)statePtr->value, statePtr->stateLog);
BIT_flushBits(bitC);
}
/* FSE_getMaxNbBits() :
* Approximate maximum cost of a symbol, in bits.
- * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
+ * Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
* note 1 : assume symbolValue is valid (<= maxSymbolValue)
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
@@ -705,7 +623,4 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
-
#endif /* FSE_STATIC_LINKING_ONLY */
-
-
diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
index 8dcb8ca39767..15081d8dc607 100644
--- a/lib/zstd/common/fse_decompress.c
+++ b/lib/zstd/common/fse_decompress.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* FSE : Finite State Entropy decoder
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -22,8 +23,8 @@
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#include "error_private.h"
-#define ZSTD_DEPS_NEED_MALLOC
-#include "zstd_deps.h"
+#include "zstd_deps.h" /* ZSTD_memcpy */
+#include "bits.h" /* ZSTD_highbit32 */
/* **************************************************************
@@ -55,19 +56,6 @@
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
-
-/* Function templates */
-FSE_DTable* FSE_createDTable (unsigned tableLog)
-{
- if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
- return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
-}
-
-void FSE_freeDTable (FSE_DTable* dt)
-{
- ZSTD_free(dt);
-}
-
static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
@@ -96,7 +84,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
- symbolNext[s] = normalizedCounter[s];
+ symbolNext[s] = (U16)normalizedCounter[s];
} } }
ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
}
@@ -111,8 +99,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
* all symbols have counts <= 8. We ensure we have 8 bytes at the end of
* our buffer to handle the over-write.
*/
- {
- U64 const add = 0x0101010101010101ull;
+ { U64 const add = 0x0101010101010101ull;
size_t pos = 0;
U64 sv = 0;
U32 s;
@@ -123,14 +110,13 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
- pos += n;
- }
- }
+ pos += (size_t)n;
+ } }
/* Now we spread those positions across the table.
- * The benefit of doing it in two stages is that we avoid the the
+ * The benefit of doing it in two stages is that we avoid the
* variable size inner loop, which caused lots of branch misses.
* Now we can run through all the positions without any branch misses.
- * We unroll the loop twice, since that is what emperically worked best.
+ * We unroll the loop twice, since that is what empirically worked best.
*/
{
size_t position = 0;
@@ -166,7 +152,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
for (u=0; u<tableSize; u++) {
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
U32 const nextState = symbolNext[symbol]++;
- tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].nbBits = (BYTE) (tableLog - ZSTD_highbit32(nextState) );
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
} }
@@ -184,49 +170,6 @@ size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsi
/*-*******************************************************
* Decompression (Byte symbols)
*********************************************************/
-size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
-{
- void* ptr = dt;
- FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
- void* dPtr = dt + 1;
- FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
-
- DTableH->tableLog = 0;
- DTableH->fastMode = 0;
-
- cell->newState = 0;
- cell->symbol = symbolValue;
- cell->nbBits = 0;
-
- return 0;
-}
-
-
-size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
-{
- void* ptr = dt;
- FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
- void* dPtr = dt + 1;
- FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
- const unsigned tableSize = 1 << nbBits;
- const unsigned tableMask = tableSize - 1;
- const unsigned maxSV1 = tableMask+1;
- unsigned s;
-
- /* Sanity checks */
- if (nbBits < 1) return ERROR(GENERIC); /* min size */
-
- /* Build Decoding Table */
- DTableH->tableLog = (U16)nbBits;
- DTableH->fastMode = 1;
- for (s=0; s<maxSV1; s++) {
- dinfo[s].newState = 0;
- dinfo[s].symbol = (BYTE)s;
- dinfo[s].nbBits = (BYTE)nbBits;
- }
-
- return 0;
-}
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
@@ -248,6 +191,8 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
FSE_initDState(&state1, &bitD, dt);
FSE_initDState(&state2, &bitD, dt);
+ RETURN_ERROR_IF(BIT_reloadDStream(&bitD)==BIT_DStream_overflow, corruption_detected, "");
+
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
@@ -287,32 +232,12 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
break;
} }
- return op-ostart;
-}
-
-
-size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
- const void* cSrc, size_t cSrcSize,
- const FSE_DTable* dt)
-{
- const void* ptr = dt;
- const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
- const U32 fastMode = DTableH->fastMode;
-
- /* select fast mode (static) */
- if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
- return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
-}
-
-
-size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
-{
- return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
+ assert(op >= ostart);
+ return (size_t)(op-ostart);
}
typedef struct {
short ncount[FSE_MAX_SYMBOL_VALUE + 1];
- FSE_DTable dtable[]; /* Dynamically sized */
} FSE_DecompressWksp;
@@ -327,13 +252,18 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
+ size_t const dtablePos = sizeof(FSE_DecompressWksp) / sizeof(FSE_DTable);
+ FSE_DTable* const dtable = (FSE_DTable*)workSpace + dtablePos;
- DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
+ FSE_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
+ /* correct offset to dtable depends on this property */
+ FSE_STATIC_ASSERT(sizeof(FSE_DecompressWksp) % sizeof(FSE_DTable) == 0);
+
/* normal FSE decoding mode */
- {
- size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
+ { size_t const NCountLength =
+ FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
if (FSE_isError(NCountLength)) return NCountLength;
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
assert(NCountLength <= cSrcSize);
@@ -342,19 +272,20 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
}
if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
- workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
+ assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize);
+ workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
- CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
+ CHECK_F( FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
{
- const void* ptr = wksp->dtable;
+ const void* ptr = dtable;
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
- if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
- return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1);
+ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0);
}
}
@@ -382,9 +313,4 @@ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc,
return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
-
-typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
-
-
-
#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
index 5042ff870308..49736dcd8f49 100644
--- a/lib/zstd/common/huf.h
+++ b/lib/zstd/common/huf.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* huff0 huffman codec,
* part of Finite State Entropy library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -12,105 +13,26 @@
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
-
#ifndef HUF_H_298734234
#define HUF_H_298734234
/* *** Dependencies *** */
#include "zstd_deps.h" /* size_t */
-
-
-/* *** library symbols visibility *** */
-/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
- * HUF symbols remain "private" (internal symbols for library only).
- * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
-#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
-# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
-#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
-# define HUF_PUBLIC_API __declspec(dllexport)
-#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
-# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
-#else
-# define HUF_PUBLIC_API
-#endif
-
-
-/* ========================== */
-/* *** simple functions *** */
-/* ========================== */
-
-/* HUF_compress() :
- * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
- * 'dst' buffer must be already allocated.
- * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
- * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
- * @return : size of compressed data (<= `dstCapacity`).
- * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
- * if HUF_isError(return), compression failed (more details using HUF_getErrorName())
- */
-HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize);
-
-/* HUF_decompress() :
- * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
- * into already allocated buffer 'dst', of minimum size 'dstSize'.
- * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
- * Note : in contrast with FSE, HUF_decompress can regenerate
- * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
- * because it knows size to regenerate (originalSize).
- * @return : size of regenerated data (== originalSize),
- * or an error code, which can be tested using HUF_isError()
- */
-HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
- const void* cSrc, size_t cSrcSize);
-
+#include "mem.h" /* U32 */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
/* *** Tool functions *** */
-#define HUF_BLOCKSIZE_MAX (128 * 1024) /*< maximum input size for a single block compressed with HUF_compress */
-HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /*< maximum compressed size (worst case) */
+#define HUF_BLOCKSIZE_MAX (128 * 1024) /*< maximum input size for a single block compressed with HUF_compress */
+size_t HUF_compressBound(size_t size); /*< maximum compressed size (worst case) */
/* Error Management */
-HUF_PUBLIC_API unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */
-HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */
+unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */
+const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */
-/* *** Advanced function *** */
-
-/* HUF_compress2() :
- * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
- * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
- * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
-HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned tableLog);
-
-/* HUF_compress4X_wksp() :
- * Same as HUF_compress2(), but uses externally allocated `workSpace`.
- * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */
#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
-HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned tableLog,
- void* workSpace, size_t wkspSize);
-
-#endif /* HUF_H_298734234 */
-
-/* ******************************************************************
- * WARNING !!
- * The following section contains advanced and experimental definitions
- * which shall never be used in the context of a dynamic library,
- * because they are not guaranteed to remain stable in the future.
- * Only consider them in association with static linking.
- * *****************************************************************/
-#if !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
-#define HUF_H_HUF_STATIC_LINKING_ONLY
-
-/* *** Dependencies *** */
-#include "mem.h" /* U32 */
-#define FSE_STATIC_LINKING_ONLY
-#include "fse.h"
-
/* *** Constants *** */
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
@@ -151,25 +73,49 @@ typedef U32 HUF_DTable;
/* ****************************************
* Advanced decompression functions
******************************************/
-size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
-#endif
-size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< decodes RLE and uncompressed */
-size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
-size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
-#endif
+/*
+ * Huffman flags bitset.
+ * For all flags, 0 is the default value.
+ */
+typedef enum {
+ /*
+ * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime.
+ * Otherwise: Ignored.
+ */
+ HUF_flags_bmi2 = (1 << 0),
+ /*
+ * If set: Test possible table depths to find the one that produces the smallest header + encoded size.
+ * If unset: Use heuristic to find the table depth.
+ */
+ HUF_flags_optimalDepth = (1 << 1),
+ /*
+ * If set: If the previous table can encode the input, always reuse the previous table.
+ * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output.
+ */
+ HUF_flags_preferRepeat = (1 << 2),
+ /*
+ * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress.
+ * If unset: Always histogram the entire input.
+ */
+ HUF_flags_suspectUncompressible = (1 << 3),
+ /*
+ * If set: Don't use assembly implementations
+ * If unset: Allow using assembly implementations
+ */
+ HUF_flags_disableAsm = (1 << 4),
+ /*
+ * If set: Don't use the fast decoding loop, always use the fallback decoding loop.
+ * If unset: Use the fast decoding loop when possible.
+ */
+ HUF_flags_disableFast = (1 << 5)
+} HUF_flags_e;
/* ****************************************
* HUF detailed API
* ****************************************/
+#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra
/*! HUF_compress() does the following:
* 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
@@ -182,12 +128,12 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
* For example, it's possible to compress several blocks using the same 'CTable',
* or to save and regenerate 'CTable' using external methods.
*/
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
-size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+unsigned HUF_minTableLog(unsigned symbolCardinality);
+unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue);
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace,
+ size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
-size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
-size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
@@ -196,6 +142,7 @@ typedef enum {
HUF_repeat_check, /*< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
HUF_repeat_valid /*< Can use the previous table and it is assumed to be valid */
} HUF_repeat;
+
/* HUF_compress4X_repeat() :
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
* If it uses hufTable it does not modify hufTable or repeat.
@@ -206,13 +153,13 @@ size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned tableLog,
void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
/* HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
*/
-#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192)
#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
size_t HUF_buildCTable_wksp (HUF_CElt* tree,
const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
@@ -238,7 +185,7 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workspace, size_t wkspSize,
- int bmi2);
+ int flags);
/* HUF_readCTable() :
* Loading a CTable saved with HUF_writeCTable() */
@@ -246,9 +193,22 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
/* HUF_getNbBitsFromCTable() :
* Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
- * Note 1 : is not inlined, as HUF_CElt definition is private */
+ * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0
+ * Note 2 : is not inlined, as HUF_CElt definition is private
+ */
U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
+typedef struct {
+ BYTE tableLog;
+ BYTE maxSymbolValue;
+ BYTE unused[sizeof(size_t) - 2];
+} HUF_CTableHeader;
+
+/* HUF_readCTableHeader() :
+ * @returns The header from the CTable specifying the tableLog and the maxSymbolValue.
+ */
+HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable);
+
/*
* HUF_decompress() does the following:
* 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
@@ -276,32 +236,12 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
-#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
-#endif
-
-size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
-
/* ====================== */
/* single stream variants */
/* ====================== */
-size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
-size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /*< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */
-size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
-size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
/* HUF_compress1X_repeat() :
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
* If it uses hufTable it does not modify hufTable or repeat.
@@ -312,47 +252,27 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned tableLog,
void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
-
-size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
-#endif
-
-size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
-size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
-#endif
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
-#endif
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
-size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /*< automatic selection of sing or double symbol decoder, based on DTable */
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /*< double-symbols decoder */
#endif
/* BMI2 variants.
* If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
*/
-size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
#endif
-size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
-size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
#endif
-#endif /* HUF_STATIC_LINKING_ONLY */
-
+#endif /* HUF_H_298734234 */
diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
index c22a2e69bf46..d9bd752fe17b 100644
--- a/lib/zstd/common/mem.h
+++ b/lib/zstd/common/mem.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -24,6 +24,7 @@
/*-****************************************
* Compiler specifics
******************************************/
+#undef MEM_STATIC /* may be already defined from common/compiler.h */
#define MEM_STATIC static inline
/*-**************************************************************
diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h
index 0e3b2c0a527d..e1890b32da88 100644
--- a/lib/zstd/common/portability_macros.h
+++ b/lib/zstd/common/portability_macros.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -12,7 +13,7 @@
#define ZSTD_PORTABILITY_MACROS_H
/*
- * This header file contains macro defintions to support portability.
+ * This header file contains macro definitions to support portability.
* This header is shared between C and ASM code, so it MUST only
* contain macro definitions. It MUST not contain any C code.
*
@@ -45,30 +46,35 @@
/* Mark the internal assembly functions as hidden */
#ifdef __ELF__
# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
+#elif defined(__APPLE__)
+# define ZSTD_HIDE_ASM_FUNCTION(func) .private_extern func
#else
# define ZSTD_HIDE_ASM_FUNCTION(func)
#endif
+/* Compile time determination of BMI2 support */
+
+
/* Enable runtime BMI2 dispatch based on the CPU.
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
*/
#ifndef DYNAMIC_BMI2
- #if ((defined(__clang__) && __has_attribute(__target__)) \
+# if ((defined(__clang__) && __has_attribute(__target__)) \
|| (defined(__GNUC__) \
- && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
- && (defined(__x86_64__) || defined(_M_X64)) \
+ && (__GNUC__ >= 11))) \
+ && (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)) \
&& !defined(__BMI2__)
- # define DYNAMIC_BMI2 1
- #else
- # define DYNAMIC_BMI2 0
- #endif
+# define DYNAMIC_BMI2 1
+# else
+# define DYNAMIC_BMI2 0
+# endif
#endif
/*
- * Only enable assembly for GNUC comptabile compilers,
+ * Only enable assembly for GNU C compatible compilers,
* because other platforms may not support GAS assembly syntax.
*
- * Only enable assembly for Linux / MacOS, other platforms may
+ * Only enable assembly for Linux / MacOS / Win32, other platforms may
* work, but they haven't been tested. This could likely be
* extended to BSD systems.
*
@@ -90,4 +96,23 @@
*/
#define ZSTD_ENABLE_ASM_X86_64_BMI2 0
+/*
+ * For x86 ELF targets, add .note.gnu.property section for Intel CET in
+ * assembly sources when CET is enabled.
+ *
+ * Additionally, any function that may be called indirectly must begin
+ * with ZSTD_CET_ENDBRANCH.
+ */
+#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \
+ && defined(__has_include)
+# if __has_include(<cet.h>)
+# include <cet.h>
+# define ZSTD_CET_ENDBRANCH _CET_ENDBR
+# endif
+#endif
+
+#ifndef ZSTD_CET_ENDBRANCH
+# define ZSTD_CET_ENDBRANCH
+#endif
+
#endif /* ZSTD_PORTABILITY_MACROS_H */
diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
index 3d7e35b309b5..44b95b25344a 100644
--- a/lib/zstd/common/zstd_common.c
+++ b/lib/zstd/common/zstd_common.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -14,7 +15,6 @@
* Dependencies
***************************************/
#define ZSTD_DEPS_NEED_MALLOC
-#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
#include "error_private.h"
#include "zstd_internal.h"
@@ -47,37 +47,3 @@ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
/*! ZSTD_getErrorString() :
* provides error code string from enum */
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
-
-
-
-/*=**************************************************************
-* Custom allocator
-****************************************************************/
-void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
-{
- if (customMem.customAlloc)
- return customMem.customAlloc(customMem.opaque, size);
- return ZSTD_malloc(size);
-}
-
-void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
-{
- if (customMem.customAlloc) {
- /* calloc implemented as malloc+memset;
- * not as efficient as calloc, but next best guess for custom malloc */
- void* const ptr = customMem.customAlloc(customMem.opaque, size);
- ZSTD_memset(ptr, 0, size);
- return ptr;
- }
- return ZSTD_calloc(1, size);
-}
-
-void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
-{
- if (ptr!=NULL) {
- if (customMem.customFree)
- customMem.customFree(customMem.opaque, ptr);
- else
- ZSTD_free(ptr);
- }
-}
diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
index 2c34e8a33a1c..f931f7d0e294 100644
--- a/lib/zstd/common/zstd_deps.h
+++ b/lib/zstd/common/zstd_deps.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -105,3 +105,17 @@ static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
#endif /* ZSTD_DEPS_IO */
#endif /* ZSTD_DEPS_NEED_IO */
+
+/*
+ * Only requested when MSAN is enabled.
+ * Need:
+ * intptr_t
+ */
+#ifdef ZSTD_DEPS_NEED_STDINT
+#ifndef ZSTD_DEPS_STDINT
+#define ZSTD_DEPS_STDINT
+
+/* intptr_t already provided by ZSTD_DEPS_COMMON */
+
+#endif /* ZSTD_DEPS_STDINT */
+#endif /* ZSTD_DEPS_NEED_STDINT */
diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
index 93305d9b41bb..52a79435caf6 100644
--- a/lib/zstd/common/zstd_internal.h
+++ b/lib/zstd/common/zstd_internal.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -28,12 +29,10 @@
#include <linux/zstd.h>
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "huf.h"
#include <linux/xxhash.h> /* XXH_reset, update, digest */
#define ZSTD_TRACE 0
-
/* ---- static assert (debug) --- */
#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
#define ZSTD_isError ERR_isError /* for inlining */
@@ -83,16 +82,17 @@ typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
#define ZSTD_FRAMECHECKSUMSIZE 4
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
-#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */
+#define MIN_LITERALS_FOR_4_STREAMS 6
-#define HufLog 12
-typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } SymbolEncodingType_e;
#define LONGNBSEQ 0x7F00
#define MINMATCH 3
#define Litbits 8
+#define LitHufLog 11
#define MaxLit ((1<<Litbits) - 1)
#define MaxML 52
#define MaxLL 35
@@ -103,6 +103,8 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
#define LLFSELog 9
#define OffFSELog 8
#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
+#define MaxMLBits 16
+#define MaxLLBits 16
#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
/* Each table cannot take more than #symbols * FSELog bits */
@@ -166,7 +168,7 @@ static void ZSTD_copy8(void* dst, const void* src) {
ZSTD_memcpy(dst, src, 8);
#endif
}
-#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+#define COPY8(d,s) do { ZSTD_copy8(d,s); d+=8; s+=8; } while (0)
/* Need to use memmove here since the literal buffer can now be located within
the dst buffer. In circumstances where the op "catches up" to where the
@@ -186,7 +188,7 @@ static void ZSTD_copy16(void* dst, const void* src) {
ZSTD_memcpy(dst, copy16_buf, 16);
#endif
}
-#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
+#define COPY16(d,s) do { ZSTD_copy16(d,s); d+=16; s+=16; } while (0)
#define WILDCOPY_OVERLENGTH 32
#define WILDCOPY_VECLEN 16
@@ -215,7 +217,7 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
/* Handle short offset copies. */
do {
- COPY8(op, ip)
+ COPY8(op, ip);
} while (op < oend);
} else {
assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
@@ -225,12 +227,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
* one COPY16() in the first call. Then, do two calls per loop since
* at that point it is more likely to have a high trip count.
*/
-#ifdef __aarch64__
- do {
- COPY16(op, ip);
- }
- while (op < oend);
-#else
ZSTD_copy16(op, ip);
if (16 >= length) return;
op += 16;
@@ -240,7 +236,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
COPY16(op, ip);
}
while (op < oend);
-#endif
}
}
@@ -273,62 +268,6 @@ typedef enum {
/*-*******************************************
* Private declarations
*********************************************/
-typedef struct seqDef_s {
- U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
- U16 litLength;
- U16 mlBase; /* mlBase == matchLength - MINMATCH */
-} seqDef;
-
-/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */
-typedef enum {
- ZSTD_llt_none = 0, /* no longLengthType */
- ZSTD_llt_literalLength = 1, /* represents a long literal */
- ZSTD_llt_matchLength = 2 /* represents a long match */
-} ZSTD_longLengthType_e;
-
-typedef struct {
- seqDef* sequencesStart;
- seqDef* sequences; /* ptr to end of sequences */
- BYTE* litStart;
- BYTE* lit; /* ptr to end of literals */
- BYTE* llCode;
- BYTE* mlCode;
- BYTE* ofCode;
- size_t maxNbSeq;
- size_t maxNbLit;
-
- /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
- * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
- * the existing value of the litLength or matchLength by 0x10000.
- */
- ZSTD_longLengthType_e longLengthType;
- U32 longLengthPos; /* Index of the sequence to apply long length modification to */
-} seqStore_t;
-
-typedef struct {
- U32 litLength;
- U32 matchLength;
-} ZSTD_sequenceLength;
-
-/*
- * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
- * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
- */
-MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
-{
- ZSTD_sequenceLength seqLen;
- seqLen.litLength = seq->litLength;
- seqLen.matchLength = seq->mlBase + MINMATCH;
- if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
- if (seqStore->longLengthType == ZSTD_llt_literalLength) {
- seqLen.litLength += 0xFFFF;
- }
- if (seqStore->longLengthType == ZSTD_llt_matchLength) {
- seqLen.matchLength += 0xFFFF;
- }
- }
- return seqLen;
-}
/*
* Contains the compressed frame size and an upper-bound for the decompressed frame size.
@@ -337,74 +276,11 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore
* `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
*/
typedef struct {
+ size_t nbBlocks;
size_t compressedSize;
unsigned long long decompressedBound;
} ZSTD_frameSizeInfo; /* decompress & legacy */
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
-void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
-
-/* custom memory allocation functions */
-void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
-void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
-void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
-
-
-MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
-{
- assert(val != 0);
- {
-# if (__GNUC__ >= 3) /* GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
-# else /* Software version */
- static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
- U32 v = val;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
-# endif
- }
-}
-
-/*
- * Counts the number of trailing zeros of a `size_t`.
- * Most compilers should support CTZ as a builtin. A backup
- * implementation is provided if the builtin isn't supported, but
- * it may not be terribly efficient.
- */
-MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val)
-{
- if (MEM_64bits()) {
-# if (__GNUC__ >= 4)
- return __builtin_ctzll((U64)val);
-# else
- static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19,
- 4, 25, 14, 28, 9, 34, 20, 56,
- 5, 17, 26, 54, 15, 41, 29, 43,
- 10, 31, 38, 35, 21, 45, 49, 57,
- 63, 6, 12, 18, 24, 27, 33, 55,
- 16, 53, 40, 42, 30, 37, 44, 48,
- 62, 11, 23, 32, 52, 39, 36, 47,
- 61, 22, 51, 46, 60, 50, 59, 58 };
- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-# endif
- } else { /* 32 bits */
-# if (__GNUC__ >= 3)
- return __builtin_ctz((U32)val);
-# else
- static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3,
- 30, 22, 20, 15, 25, 17, 4, 8,
- 31, 27, 13, 23, 21, 19, 16, 7,
- 26, 12, 18, 6, 11, 5, 10, 9 };
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-# endif
- }
-}
-
-
/* ZSTD_invalidateRepCodes() :
* ensures next compression will not use repcodes from previous block.
* Note : only works with regular variant;
@@ -420,13 +296,13 @@ typedef struct {
/*! ZSTD_getcBlockSize() :
* Provides the size of compressed block from block header `src` */
-/* Used by: decompress, fullbench (does not get its definition from here) */
+/* Used by: decompress, fullbench */
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
blockProperties_t* bpPtr);
/*! ZSTD_decodeSeqHeaders() :
* decode sequence header from src */
-/* Used by: decompress, fullbench (does not get its definition from here) */
+/* Used by: zstd_decompress_block, fullbench */
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
const void* src, size_t srcSize);
@@ -439,5 +315,4 @@ MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
}
-
#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/zstd/compress/clevels.h b/lib/zstd/compress/clevels.h
index d9a76112ec3a..6ab8be6532ef 100644
--- a/lib/zstd/compress/clevels.h
+++ b/lib/zstd/compress/clevels.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
index ec5b1ca6d71a..44a3c10becf2 100644
--- a/lib/zstd/compress/fse_compress.c
+++ b/lib/zstd/compress/fse_compress.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* FSE : Finite State Entropy encoder
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -25,7 +26,8 @@
#include "../common/error_private.h"
#define ZSTD_DEPS_NEED_MALLOC
#define ZSTD_DEPS_NEED_MATH64
-#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
+#include "../common/zstd_deps.h" /* ZSTD_memset */
+#include "../common/bits.h" /* ZSTD_highbit32 */
/* **************************************************************
@@ -90,7 +92,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
assert(tableLog < 16); /* required for threshold strategy to work */
/* For explanations on how to distribute symbol values over the table :
- * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+ * https://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
#ifdef __clang_analyzer__
ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
@@ -191,7 +193,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
break;
default :
assert(normalizedCounter[s] > 1);
- { U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1);
+ { U32 const maxBitsOut = tableLog - ZSTD_highbit32 ((U32)normalizedCounter[s]-1);
U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
@@ -224,8 +226,8 @@ size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
+ 4 /* bitCount initialized at 4 */
+ 2 /* first two symbols may use one additional bit each */) / 8)
- + 1 /* round up to whole nb bytes */
- + 2 /* additional two bytes for bitstream flush */;
+ + 1 /* round up to whole nb bytes */
+ + 2 /* additional two bytes for bitstream flush */;
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
}
@@ -254,7 +256,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
/* Init */
remaining = tableSize+1; /* +1 for extra accuracy */
threshold = tableSize;
- nbBits = tableLog+1;
+ nbBits = (int)tableLog+1;
while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
if (previousIs0) {
@@ -273,7 +275,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
}
while (symbol >= start+3) {
start+=3;
- bitStream += 3 << bitCount;
+ bitStream += 3U << bitCount;
bitCount += 2;
}
bitStream += (symbol-start) << bitCount;
@@ -293,7 +295,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
count++; /* +1 for extra accuracy */
if (count>=threshold)
count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
- bitStream += count << bitCount;
+ bitStream += (U32)count << bitCount;
bitCount += nbBits;
bitCount -= (count<max);
previousIs0 = (count==1);
@@ -321,7 +323,8 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
out[1] = (BYTE)(bitStream>>8);
out+= (bitCount+7) /8;
- return (out-ostart);
+ assert(out >= ostart);
+ return (size_t)(out-ostart);
}
@@ -342,21 +345,11 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize,
* FSE Compression Code
****************************************************************/
-FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
-{
- size_t size;
- if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
- size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
- return (FSE_CTable*)ZSTD_malloc(size);
-}
-
-void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
-
/* provides the minimum logSize to safely represent a distribution */
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
{
- U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
- U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+ U32 minBitsSrc = ZSTD_highbit32((U32)(srcSize)) + 1;
+ U32 minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2;
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
assert(srcSize > 1); /* Not supported, RLE should be used instead */
return minBits;
@@ -364,7 +357,7 @@ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
{
- U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+ U32 maxBitsSrc = ZSTD_highbit32((U32)(srcSize - 1)) - minus;
U32 tableLog = maxTableLog;
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
assert(srcSize > 1); /* Not supported, RLE should be used instead */
@@ -532,40 +525,6 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
return tableLog;
}
-
-/* fake FSE_CTable, for raw (uncompressed) input */
-size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
-{
- const unsigned tableSize = 1 << nbBits;
- const unsigned tableMask = tableSize - 1;
- const unsigned maxSymbolValue = tableMask;
- void* const ptr = ct;
- U16* const tableU16 = ( (U16*) ptr) + 2;
- void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
- FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
- unsigned s;
-
- /* Sanity checks */
- if (nbBits < 1) return ERROR(GENERIC); /* min size */
-
- /* header */
- tableU16[-2] = (U16) nbBits;
- tableU16[-1] = (U16) maxSymbolValue;
-
- /* Build table */
- for (s=0; s<tableSize; s++)
- tableU16[s] = (U16)(tableSize + s);
-
- /* Build Symbol Transformation Table */
- { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
- for (s=0; s<=maxSymbolValue; s++) {
- symbolTT[s].deltaNbBits = deltaNbBits;
- symbolTT[s].deltaFindState = s-1;
- } }
-
- return 0;
-}
-
/* fake FSE_CTable, for rle input (always same symbol) */
size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
{
@@ -664,5 +623,4 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
-
#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c
index 3ddc6dfb6894..87145a2d9160 100644
--- a/lib/zstd/compress/hist.c
+++ b/lib/zstd/compress/hist.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* hist : Histogram functions
* part of Finite State Entropy project
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -26,6 +27,16 @@ unsigned HIST_isError(size_t code) { return ERR_isError(code); }
/*-**************************************************************
* Histogram functions
****************************************************************/
+void HIST_add(unsigned* count, const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const end = ip + srcSize;
+
+ while (ip<end) {
+ count[*ip++]++;
+ }
+}
+
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
const void* src, size_t srcSize)
{
diff --git a/lib/zstd/compress/hist.h b/lib/zstd/compress/hist.h
index fc1830abc9c6..e5d57d79e4d5 100644
--- a/lib/zstd/compress/hist.h
+++ b/lib/zstd/compress/hist.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/* ******************************************************************
* hist : Histogram functions
* part of Finite State Entropy project
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -73,3 +74,10 @@ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
*/
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
const void* src, size_t srcSize);
+
+/*! HIST_add() :
+ * Lowest level: just add nb of occurrences of characters from @src into @count.
+ * @count is not reset. @count array is presumed large enough (i.e. 1 KB).
+ @ This function does not need any additional stack memory.
+ */
+void HIST_add(unsigned* count, const void* src, size_t srcSize);
diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
index 74ef0db47621..0b229f5d2ae2 100644
--- a/lib/zstd/compress/huf_compress.c
+++ b/lib/zstd/compress/huf_compress.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* Huffman encoder, part of New Generation Entropy library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -26,9 +27,9 @@
#include "hist.h"
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
#include "../common/fse.h" /* header compression */
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/error_private.h"
+#include "../common/bits.h" /* ZSTD_highbit32 */
/* **************************************************************
@@ -39,13 +40,67 @@
/* **************************************************************
-* Utils
+* Required declarations
****************************************************************/
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+typedef struct nodeElt_s {
+ U32 count;
+ U16 parent;
+ BYTE byte;
+ BYTE nbBits;
+} nodeElt;
+
+
+/* **************************************************************
+* Debug Traces
+****************************************************************/
+
+#if DEBUGLEVEL >= 2
+
+static size_t showU32(const U32* arr, size_t size)
{
- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+ size_t u;
+ for (u=0; u<size; u++) {
+ RAWLOG(6, " %u", arr[u]); (void)arr;
+ }
+ RAWLOG(6, " \n");
+ return size;
}
+static size_t HUF_getNbBits(HUF_CElt elt);
+
+static size_t showCTableBits(const HUF_CElt* ctable, size_t size)
+{
+ size_t u;
+ for (u=0; u<size; u++) {
+ RAWLOG(6, " %zu", HUF_getNbBits(ctable[u])); (void)ctable;
+ }
+ RAWLOG(6, " \n");
+ return size;
+
+}
+
+static size_t showHNodeSymbols(const nodeElt* hnode, size_t size)
+{
+ size_t u;
+ for (u=0; u<size; u++) {
+ RAWLOG(6, " %u", hnode[u].byte); (void)hnode;
+ }
+ RAWLOG(6, " \n");
+ return size;
+}
+
+static size_t showHNodeBits(const nodeElt* hnode, size_t size)
+{
+ size_t u;
+ for (u=0; u<size; u++) {
+ RAWLOG(6, " %u", hnode[u].nbBits); (void)hnode;
+ }
+ RAWLOG(6, " \n");
+ return size;
+}
+
+#endif
+
/* *******************************************************
* HUF : Huffman block compression
@@ -86,7 +141,10 @@ typedef struct {
S16 norm[HUF_TABLELOG_MAX+1];
} HUF_CompressWeightsWksp;
-static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
+static size_t
+HUF_compressWeights(void* dst, size_t dstSize,
+ const void* weightTable, size_t wtSize,
+ void* workspace, size_t workspaceSize)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
@@ -137,7 +195,7 @@ static size_t HUF_getNbBitsFast(HUF_CElt elt)
static size_t HUF_getValue(HUF_CElt elt)
{
- return elt & ~0xFF;
+ return elt & ~(size_t)0xFF;
}
static size_t HUF_getValueFast(HUF_CElt elt)
@@ -160,6 +218,25 @@ static void HUF_setValue(HUF_CElt* elt, size_t value)
}
}
+HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable)
+{
+ HUF_CTableHeader header;
+ ZSTD_memcpy(&header, ctable, sizeof(header));
+ return header;
+}
+
+static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue)
+{
+ HUF_CTableHeader header;
+ HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header));
+ ZSTD_memset(&header, 0, sizeof(header));
+ assert(tableLog < 256);
+ header.tableLog = (BYTE)tableLog;
+ assert(maxSymbolValue < 256);
+ header.maxSymbolValue = (BYTE)maxSymbolValue;
+ ZSTD_memcpy(ctable, &header, sizeof(header));
+}
+
typedef struct {
HUF_CompressWeightsWksp wksp;
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
@@ -175,6 +252,11 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
U32 n;
HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
+ HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp));
+
+ assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue);
+ assert(HUF_readCTableHeader(CTable).tableLog == huffLog);
+
/* check conditions */
if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
@@ -204,16 +286,6 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
return ((maxSymbolValue+1)/2) + 1;
}
-/*! HUF_writeCTable() :
- `CTable` : Huffman tree to save, using huf representation.
- @return : size of saved CTable */
-size_t HUF_writeCTable (void* dst, size_t maxDstSize,
- const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
-{
- HUF_WriteCTableWksp wksp;
- return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
-}
-
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
{
@@ -231,7 +303,9 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
- CTable[0] = tableLog;
+ *maxSymbolValuePtr = nbSymbols - 1;
+
+ HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr);
/* Prepare base value per rank */
{ U32 n, nextRankStart = 0;
@@ -263,74 +337,71 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
{ U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
}
- *maxSymbolValuePtr = nbSymbols - 1;
return readSize;
}
U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
{
- const HUF_CElt* ct = CTable + 1;
+ const HUF_CElt* const ct = CTable + 1;
assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
+ if (symbolValue > HUF_readCTableHeader(CTable).maxSymbolValue)
+ return 0;
return (U32)HUF_getNbBits(ct[symbolValue]);
}
-typedef struct nodeElt_s {
- U32 count;
- U16 parent;
- BYTE byte;
- BYTE nbBits;
-} nodeElt;
-
/*
* HUF_setMaxHeight():
- * Enforces maxNbBits on the Huffman tree described in huffNode.
+ * Try to enforce @targetNbBits on the Huffman tree described in @huffNode.
*
- * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
- * the tree to so that it is a valid canonical Huffman tree.
+ * It attempts to convert all nodes with nbBits > @targetNbBits
+ * to employ @targetNbBits instead. Then it adjusts the tree
+ * so that it remains a valid canonical Huffman tree.
*
* @pre The sum of the ranks of each symbol == 2^largestBits,
* where largestBits == huffNode[lastNonNull].nbBits.
* @post The sum of the ranks of each symbol == 2^largestBits,
- * where largestBits is the return value <= maxNbBits.
+ * where largestBits is the return value (expected <= targetNbBits).
*
- * @param huffNode The Huffman tree modified in place to enforce maxNbBits.
+ * @param huffNode The Huffman tree modified in place to enforce targetNbBits.
+ * It's presumed sorted, from most frequent to rarest symbol.
* @param lastNonNull The symbol with the lowest count in the Huffman tree.
- * @param maxNbBits The maximum allowed number of bits, which the Huffman tree
+ * @param targetNbBits The allowed number of bits, which the Huffman tree
* may not respect. After this function the Huffman tree will
- * respect maxNbBits.
- * @return The maximum number of bits of the Huffman tree after adjustment,
- * necessarily no more than maxNbBits.
+ * respect targetNbBits.
+ * @return The maximum number of bits of the Huffman tree after adjustment.
*/
-static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits)
{
const U32 largestBits = huffNode[lastNonNull].nbBits;
- /* early exit : no elt > maxNbBits, so the tree is already valid. */
- if (largestBits <= maxNbBits) return largestBits;
+ /* early exit : no elt > targetNbBits, so the tree is already valid. */
+ if (largestBits <= targetNbBits) return largestBits;
+
+ DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits);
/* there are several too large elements (at least >= 2) */
{ int totalCost = 0;
- const U32 baseCost = 1 << (largestBits - maxNbBits);
+ const U32 baseCost = 1 << (largestBits - targetNbBits);
int n = (int)lastNonNull;
- /* Adjust any ranks > maxNbBits to maxNbBits.
+ /* Adjust any ranks > targetNbBits to targetNbBits.
* Compute totalCost, which is how far the sum of the ranks is
* we are over 2^largestBits after adjust the offending ranks.
*/
- while (huffNode[n].nbBits > maxNbBits) {
+ while (huffNode[n].nbBits > targetNbBits) {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
- huffNode[n].nbBits = (BYTE)maxNbBits;
+ huffNode[n].nbBits = (BYTE)targetNbBits;
n--;
}
- /* n stops at huffNode[n].nbBits <= maxNbBits */
- assert(huffNode[n].nbBits <= maxNbBits);
- /* n end at index of smallest symbol using < maxNbBits */
- while (huffNode[n].nbBits == maxNbBits) --n;
+ /* n stops at huffNode[n].nbBits <= targetNbBits */
+ assert(huffNode[n].nbBits <= targetNbBits);
+ /* n end at index of smallest symbol using < targetNbBits */
+ while (huffNode[n].nbBits == targetNbBits) --n;
- /* renorm totalCost from 2^largestBits to 2^maxNbBits
+ /* renorm totalCost from 2^largestBits to 2^targetNbBits
* note : totalCost is necessarily a multiple of baseCost */
- assert((totalCost & (baseCost - 1)) == 0);
- totalCost >>= (largestBits - maxNbBits);
+ assert(((U32)totalCost & (baseCost - 1)) == 0);
+ totalCost >>= (largestBits - targetNbBits);
assert(totalCost > 0);
/* repay normalized cost */
@@ -339,19 +410,19 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
/* Get pos of last (smallest = lowest cum. count) symbol per rank */
ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
- { U32 currentNbBits = maxNbBits;
+ { U32 currentNbBits = targetNbBits;
int pos;
for (pos=n ; pos >= 0; pos--) {
if (huffNode[pos].nbBits >= currentNbBits) continue;
- currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
- rankLast[maxNbBits-currentNbBits] = (U32)pos;
+ currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */
+ rankLast[targetNbBits-currentNbBits] = (U32)pos;
} }
while (totalCost > 0) {
/* Try to reduce the next power of 2 above totalCost because we
* gain back half the rank.
*/
- U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
+ U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1;
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
U32 const highPos = rankLast[nBitsToDecrease];
U32 const lowPos = rankLast[nBitsToDecrease-1];
@@ -391,7 +462,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
rankLast[nBitsToDecrease] = noSymbol;
else {
rankLast[nBitsToDecrease]--;
- if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
+ if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease)
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
}
} /* while (totalCost > 0) */
@@ -403,11 +474,11 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
* TODO.
*/
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
- /* special case : no rank 1 symbol (using maxNbBits-1);
- * let's create one from largest rank 0 (using maxNbBits).
+ /* special case : no rank 1 symbol (using targetNbBits-1);
+ * let's create one from largest rank 0 (using targetNbBits).
*/
if (rankLast[1] == noSymbol) {
- while (huffNode[n].nbBits == maxNbBits) n--;
+ while (huffNode[n].nbBits == targetNbBits) n--;
huffNode[n+1].nbBits--;
assert(n >= 0);
rankLast[1] = (U32)(n+1);
@@ -421,7 +492,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
} /* repay normalized cost */
} /* there are several too large elements (at least >= 2) */
- return maxNbBits;
+ return targetNbBits;
}
typedef struct {
@@ -429,7 +500,7 @@ typedef struct {
U16 curr;
} rankPos;
-typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
+typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)];
/* Number of buckets available for HUF_sort() */
#define RANK_POSITION_TABLE_SIZE 192
@@ -448,8 +519,8 @@ typedef struct {
* Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
*/
#define RANK_POSITION_MAX_COUNT_LOG 32
-#define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */
-#define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */
+#define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */)
+#define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */)
/* Return the appropriate bucket index for a given count. See definition of
* RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
@@ -457,7 +528,7 @@ typedef struct {
static U32 HUF_getIndex(U32 const count) {
return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
? count
- : BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
+ : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
}
/* Helper swap function for HUF_quickSortPartition() */
@@ -580,7 +651,7 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
/* Sort each bucket. */
for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
- U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base;
+ int const bucketSize = rankPosition[n].curr - rankPosition[n].base;
U32 const bucketStartIdx = rankPosition[n].base;
if (bucketSize > 1) {
assert(bucketStartIdx < maxSymbolValue1);
@@ -591,6 +662,7 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
assert(HUF_isSorted(huffNode, maxSymbolValue1));
}
+
/* HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
@@ -611,6 +683,7 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
int lowS, lowN;
int nodeNb = STARTNODE;
int n, nodeRoot;
+ DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1);
/* init for parents */
nonNullRank = (int)maxSymbolValue;
while(huffNode[nonNullRank].count == 0) nonNullRank--;
@@ -637,6 +710,8 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
for (n=0; n<=nonNullRank; n++)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+ DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1));
+
return nonNullRank;
}
@@ -671,31 +746,40 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i
HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */
for (n=0; n<alphabetSize; n++)
HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */
- CTable[0] = maxNbBits;
+
+ HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue);
}
-size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+size_t
+HUF_buildCTable_wksp(HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
+ void* workSpace, size_t wkspSize)
{
- HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
+ HUF_buildCTable_wksp_tables* const wksp_tables =
+ (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
nodeElt* const huffNode = huffNode0+1;
int nonNullRank;
+ HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables));
+
+ DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1);
+
/* safety checks */
if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
- return ERROR(workSpace_tooSmall);
+ return ERROR(workSpace_tooSmall);
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
- return ERROR(maxSymbolValue_tooLarge);
+ return ERROR(maxSymbolValue_tooLarge);
ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
/* sort, decreasing order */
HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
+ DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1));
/* build tree */
nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
- /* enforce maxTableLog */
+ /* determine and enforce maxTableLog */
maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
@@ -716,13 +800,20 @@ size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count,
}
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
- HUF_CElt const* ct = CTable + 1;
- int bad = 0;
- int s;
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
- bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
- }
- return !bad;
+ HUF_CTableHeader header = HUF_readCTableHeader(CTable);
+ HUF_CElt const* ct = CTable + 1;
+ int bad = 0;
+ int s;
+
+ assert(header.tableLog <= HUF_TABLELOG_ABSOLUTEMAX);
+
+ if (header.maxSymbolValue < maxSymbolValue)
+ return 0;
+
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
+ }
+ return !bad;
}
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
@@ -804,7 +895,7 @@ FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int id
#if DEBUGLEVEL >= 1
{
size_t const nbBits = HUF_getNbBits(elt);
- size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1;
+ size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1;
(void)dirtyBits;
/* Middle bits are 0. */
assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
@@ -884,7 +975,7 @@ static size_t HUF_closeCStream(HUF_CStream_t* bitC)
{
size_t const nbBits = bitC->bitPos[0] & 0xFF;
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
- return (bitC->ptr - bitC->startPtr) + (nbBits > 0);
+ return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0);
}
}
@@ -964,17 +1055,17 @@ HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable)
{
- U32 const tableLog = (U32)CTable[0];
+ U32 const tableLog = HUF_readCTableHeader(CTable).tableLog;
HUF_CElt const* ct = CTable + 1;
const BYTE* ip = (const BYTE*) src;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
- BYTE* op = ostart;
HUF_CStream_t bitC;
/* init */
if (dstSize < 8) return 0; /* not enough space to compress */
- { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
+ { BYTE* op = ostart;
+ size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
if (HUF_isError(initErr)) return 0; }
if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
@@ -1045,9 +1136,9 @@ HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
static size_t
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
- const HUF_CElt* CTable, const int bmi2)
+ const HUF_CElt* CTable, const int flags)
{
- if (bmi2) {
+ if (flags & HUF_flags_bmi2) {
return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
}
return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
@@ -1058,28 +1149,23 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
static size_t
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
- const HUF_CElt* CTable, const int bmi2)
+ const HUF_CElt* CTable, const int flags)
{
- (void)bmi2;
+ (void)flags;
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
}
#endif
-size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
{
- return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
-}
-
-size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
-{
- return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
+ return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
}
static size_t
HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
- const HUF_CElt* CTable, int bmi2)
+ const HUF_CElt* CTable, int flags)
{
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
const BYTE* ip = (const BYTE*) src;
@@ -1093,7 +1179,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
op += 6; /* jumpTable */
assert(op <= oend);
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart, (U16)cSize);
op += cSize;
@@ -1101,7 +1187,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
ip += segmentSize;
assert(op <= oend);
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart+2, (U16)cSize);
op += cSize;
@@ -1109,7 +1195,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
ip += segmentSize;
assert(op <= oend);
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart+4, (U16)cSize);
op += cSize;
@@ -1118,7 +1204,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
ip += segmentSize;
assert(op <= oend);
assert(ip <= iend);
- { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) );
if (cSize == 0 || cSize > 65535) return 0;
op += cSize;
}
@@ -1126,14 +1212,9 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
return (size_t)(op-ostart);
}
-size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
-{
- return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
-}
-
-size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
{
- return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
+ return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
}
typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
@@ -1141,11 +1222,11 @@ typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
static size_t HUF_compressCTable_internal(
BYTE* const ostart, BYTE* op, BYTE* const oend,
const void* src, size_t srcSize,
- HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
+ HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags)
{
size_t const cSize = (nbStreams==HUF_singleStream) ?
- HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
- HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
+ HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) :
+ HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags);
if (HUF_isError(cSize)) { return cSize; }
if (cSize==0) { return 0; } /* uncompressible */
op += cSize;
@@ -1168,6 +1249,81 @@ typedef struct {
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
+unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue)
+{
+ unsigned cardinality = 0;
+ unsigned i;
+
+ for (i = 0; i < maxSymbolValue + 1; i++) {
+ if (count[i] != 0) cardinality += 1;
+ }
+
+ return cardinality;
+}
+
+unsigned HUF_minTableLog(unsigned symbolCardinality)
+{
+ U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1;
+ return minBitsSymbols;
+}
+
+unsigned HUF_optimalTableLog(
+ unsigned maxTableLog,
+ size_t srcSize,
+ unsigned maxSymbolValue,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* table,
+ const unsigned* count,
+ int flags)
+{
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables));
+
+ if (!(flags & HUF_flags_optimalDepth)) {
+ /* cheap evaluation, based on FSE */
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+ }
+
+ { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp);
+ size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp);
+ size_t hSize, newSize;
+ const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue);
+ const unsigned minTableLog = HUF_minTableLog(symbolCardinality);
+ size_t optSize = ((size_t) ~0) - 1;
+ unsigned optLog = maxTableLog, optLogGuess;
+
+ DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize);
+
+ /* Search until size increases */
+ for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) {
+ DEBUGLOG(7, "checking for huffLog=%u", optLogGuess);
+
+ { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
+ if (ERR_isError(maxBits)) continue;
+
+ if (maxBits < optLogGuess && optLogGuess > minTableLog) break;
+
+ hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
+ }
+
+ if (ERR_isError(hSize)) continue;
+
+ newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize;
+
+ if (newSize > optSize + 1) {
+ break;
+ }
+
+ if (newSize < optSize) {
+ optSize = newSize;
+ optLog = optLogGuess;
+ }
+ }
+ assert(optLog <= HUF_TABLELOG_MAX);
+ return optLog;
+ }
+}
+
/* HUF_compress_internal() :
* `workSpace_align4` must be aligned on 4-bytes boundaries,
* and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
@@ -1177,14 +1333,14 @@ HUF_compress_internal (void* dst, size_t dstSize,
unsigned maxSymbolValue, unsigned huffLog,
HUF_nbStreams_e nbStreams,
void* workSpace, size_t wkspSize,
- HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
- const int bmi2, unsigned suspectUncompressible)
+ HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags)
{
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
+ DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize);
HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
/* checks & inits */
@@ -1198,16 +1354,17 @@ HUF_compress_internal (void* dst, size_t dstSize,
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
/* Heuristic : If old table is valid, use it for small inputs */
- if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
+ if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- nbStreams, oldHufTable, bmi2);
+ nbStreams, oldHufTable, flags);
}
/* If uncompressible data is suspected, do a smaller sampling first */
DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
- if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
+ if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
size_t largestTotal = 0;
+ DEBUGLOG(5, "input suspected incompressible : sampling to check");
{ unsigned maxSymbolValueBegin = maxSymbolValue;
CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
largestTotal += largestBegin;
@@ -1224,6 +1381,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
+ DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1));
/* Check validity of previous table */
if ( repeat
@@ -1232,25 +1390,20 @@ HUF_compress_internal (void* dst, size_t dstSize,
*repeat = HUF_repeat_none;
}
/* Heuristic : use existing table for small inputs */
- if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
+ if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- nbStreams, oldHufTable, bmi2);
+ nbStreams, oldHufTable, flags);
}
/* Build Huffman Tree */
- huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags);
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
maxSymbolValue, huffLog,
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
CHECK_F(maxBits);
huffLog = (U32)maxBits;
- }
- /* Zero unused symbols in CTable, so we can check it for validity */
- {
- size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue);
- size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt);
- ZSTD_memset(table->CTable + ctableSize, 0, unusedSize);
+ DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1));
}
/* Write table description header */
@@ -1263,7 +1416,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- nbStreams, oldHufTable, bmi2);
+ nbStreams, oldHufTable, flags);
} }
/* Use the new huffman table */
@@ -1275,61 +1428,35 @@ HUF_compress_internal (void* dst, size_t dstSize,
}
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- nbStreams, table->CTable, bmi2);
-}
-
-
-size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned huffLog,
- void* workSpace, size_t wkspSize)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize,
- maxSymbolValue, huffLog, HUF_singleStream,
- workSpace, wkspSize,
- NULL, NULL, 0, 0 /*bmi2*/, 0);
+ nbStreams, table->CTable, flags);
}
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat,
- int bmi2, unsigned suspectUncompressible)
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags)
{
+ DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize);
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_singleStream,
workSpace, wkspSize, hufTable,
- repeat, preferRepeat, bmi2, suspectUncompressible);
-}
-
-/* HUF_compress4X_repeat():
- * compress input using 4 streams.
- * provide workspace to generate compression tables */
-size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned huffLog,
- void* workSpace, size_t wkspSize)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize,
- maxSymbolValue, huffLog, HUF_fourStreams,
- workSpace, wkspSize,
- NULL, NULL, 0, 0 /*bmi2*/, 0);
+ repeat, flags);
}
/* HUF_compress4X_repeat():
* compress input using 4 streams.
* consider skipping quickly
- * re-use an existing huffman compression table */
+ * reuse an existing huffman compression table */
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible)
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags)
{
+ DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize);
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_fourStreams,
workSpace, wkspSize,
- hufTable, repeat, preferRepeat, bmi2, suspectUncompressible);
+ hufTable, repeat, flags);
}
-
diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
index 16bb995bc6c4..c41a747413e0 100644
--- a/lib/zstd/compress/zstd_compress.c
+++ b/lib/zstd/compress/zstd_compress.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,12 +12,13 @@
/*-*************************************
* Dependencies
***************************************/
+#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
#include "../common/mem.h"
+#include "../common/error_private.h"
#include "hist.h" /* HIST_countFast_wksp */
#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "zstd_compress_internal.h"
#include "zstd_compress_sequences.h"
@@ -27,6 +29,7 @@
#include "zstd_opt.h"
#include "zstd_ldm.h"
#include "zstd_compress_superblock.h"
+#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_rotateRight_U64 */
/* ***************************************************************
* Tuning parameters
@@ -44,7 +47,7 @@
* in log format, aka 17 => 1 << 17 == 128Ki positions.
* This structure is only used in zstd_opt.
* Since allocation is centralized for all strategies, it has to be known here.
- * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3,
+ * The actual (selected) size of the hash table is then stored in ZSTD_MatchState_t.hashLog3,
* so that zstd_opt.c doesn't need to know about this constant.
*/
#ifndef ZSTD_HASHLOG3_MAX
@@ -55,14 +58,17 @@
* Helper functions
***************************************/
/* ZSTD_compressBound()
- * Note that the result from this function is only compatible with the "normal"
- * full-block strategy.
- * When there are a lot of small blocks due to frequent flush in streaming mode
- * the overhead of headers can make the compressed data to be larger than the
- * return value of ZSTD_compressBound().
+ * Note that the result from this function is only valid for
+ * the one-pass compression functions.
+ * When employing the streaming mode,
+ * if flushes are frequently altering the size of blocks,
+ * the overhead from block headers can make the compressed data larger
+ * than the return value of ZSTD_compressBound().
*/
size_t ZSTD_compressBound(size_t srcSize) {
- return ZSTD_COMPRESSBOUND(srcSize);
+ size_t const r = ZSTD_COMPRESSBOUND(srcSize);
+ if (r==0) return ERROR(srcSize_wrong);
+ return r;
}
@@ -75,12 +81,12 @@ struct ZSTD_CDict_s {
ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
ZSTD_cwksp workspace;
- ZSTD_matchState_t matchState;
+ ZSTD_MatchState_t matchState;
ZSTD_compressedBlockState_t cBlockState;
ZSTD_customMem customMem;
U32 dictID;
int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
- ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
+ ZSTD_ParamSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
* row-based matchfinder. Unless the cdict is reloaded, we will use
* the same greedy/lazy matchfinder at compression time.
*/
@@ -130,11 +136,12 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
ZSTD_cwksp_move(&cctx->workspace, &ws);
cctx->staticSize = workspaceSize;
- /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
- if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
+ /* statically sized space. tmpWorkspace never moves (but prev/next block swap places) */
+ if (!ZSTD_cwksp_check_available(&cctx->workspace, TMP_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
- cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
+ cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, TMP_WORKSPACE_SIZE);
+ cctx->tmpWkspSize = TMP_WORKSPACE_SIZE;
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
return cctx;
}
@@ -168,15 +175,13 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
{
+ DEBUGLOG(3, "ZSTD_freeCCtx (address: %p)", (void*)cctx);
if (cctx==NULL) return 0; /* support free on NULL */
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"not compatible with static CCtx");
- {
- int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
+ { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
ZSTD_freeCCtxContent(cctx);
- if (!cctxInWorkspace) {
- ZSTD_customFree(cctx, cctx->customMem);
- }
+ if (!cctxInWorkspace) ZSTD_customFree(cctx, cctx->customMem);
}
return 0;
}
@@ -205,7 +210,7 @@ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
}
/* private API call, for dictBuilder only */
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
/* Returns true if the strategy supports using a row based matchfinder */
static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
@@ -215,32 +220,27 @@ static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
* for this compression.
*/
-static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) {
+static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_ParamSwitch_e mode) {
assert(mode != ZSTD_ps_auto);
return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);
}
/* Returns row matchfinder usage given an initial mode and cParams */
-static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode,
+static ZSTD_ParamSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_ParamSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
-#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON)
- int const kHasSIMD128 = 1;
-#else
- int const kHasSIMD128 = 0;
-#endif
+ /* The Linux Kernel does not use SIMD, and 128KB is a very common size, e.g. in BtrFS.
+ * The row match finder is slower for this size without SIMD, so disable it.
+ */
+ const unsigned kWindowLogLowerBound = 17;
if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
mode = ZSTD_ps_disable;
if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
- if (kHasSIMD128) {
- if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
- } else {
- if (cParams->windowLog > 17) mode = ZSTD_ps_enable;
- }
+ if (cParams->windowLog > kWindowLogLowerBound) mode = ZSTD_ps_enable;
return mode;
}
/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */
-static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
+static ZSTD_ParamSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_ParamSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
if (mode != ZSTD_ps_auto) return mode;
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable;
@@ -248,7 +248,7 @@ static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
- const ZSTD_paramSwitch_e useRowMatchFinder,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
const U32 forDDSDict) {
assert(useRowMatchFinder != ZSTD_ps_auto);
/* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
@@ -257,16 +257,44 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
}
-/* Returns 1 if compression parameters are such that we should
+/* Returns ZSTD_ps_enable if compression parameters are such that we should
* enable long distance matching (wlog >= 27, strategy >= btopt).
- * Returns 0 otherwise.
+ * Returns ZSTD_ps_disable otherwise.
*/
-static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
+static ZSTD_ParamSwitch_e ZSTD_resolveEnableLdm(ZSTD_ParamSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
if (mode != ZSTD_ps_auto) return mode;
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
}
+static int ZSTD_resolveExternalSequenceValidation(int mode) {
+ return mode;
+}
+
+/* Resolves maxBlockSize to the default if no value is present. */
+static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) {
+ if (maxBlockSize == 0) {
+ return ZSTD_BLOCKSIZE_MAX;
+ } else {
+ return maxBlockSize;
+ }
+}
+
+static ZSTD_ParamSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_ParamSwitch_e value, int cLevel) {
+ if (value != ZSTD_ps_auto) return value;
+ if (cLevel < 10) {
+ return ZSTD_ps_disable;
+ } else {
+ return ZSTD_ps_enable;
+ }
+}
+
+/* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged.
+ * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */
+static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) {
+ return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast;
+}
+
static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
ZSTD_compressionParameters cParams)
{
@@ -282,8 +310,12 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
assert(cctxParams.ldmParams.hashRateLog < 32);
}
- cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
+ cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams);
cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
+ cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences);
+ cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize);
+ cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes,
+ cctxParams.compressionLevel);
assert(!ZSTD_checkCParams(cParams));
return cctxParams;
}
@@ -329,10 +361,13 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel)
#define ZSTD_NO_CLEVEL 0
/*
- * Initializes the cctxParams from params and compressionLevel.
+ * Initializes `cctxParams` from `params` and `compressionLevel`.
* @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
*/
-static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
+static void
+ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams,
+ const ZSTD_parameters* params,
+ int compressionLevel)
{
assert(!ZSTD_checkCParams(params->cParams));
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
@@ -343,10 +378,13 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par
*/
cctxParams->compressionLevel = compressionLevel;
cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &params->cParams);
- cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, &params->cParams);
+ cctxParams->postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, &params->cParams);
cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, &params->cParams);
+ cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences);
+ cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize);
+ cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel);
DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
- cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
+ cctxParams->useRowMatchFinder, cctxParams->postBlockSplitter, cctxParams->ldmParams.enableLdm);
}
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
@@ -359,7 +397,7 @@ size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_paramete
/*
* Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
- * @param param Validated zstd parameters.
+ * @param params Validated zstd parameters.
*/
static void ZSTD_CCtxParams_setZstdParams(
ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
@@ -455,8 +493,8 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
return bounds;
case ZSTD_c_enableLongDistanceMatching:
- bounds.lowerBound = 0;
- bounds.upperBound = 1;
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
case ZSTD_c_ldmHashLog:
@@ -534,11 +572,16 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
bounds.upperBound = 1;
return bounds;
- case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_splitAfterSequences:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
+ case ZSTD_c_blockSplitterLevel:
+ bounds.lowerBound = 0;
+ bounds.upperBound = ZSTD_BLOCKSPLITTER_LEVEL_MAX;
+ return bounds;
+
case ZSTD_c_useRowMatchFinder:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
@@ -549,6 +592,26 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
bounds.upperBound = 1;
return bounds;
+ case ZSTD_c_prefetchCDictTables:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_enableSeqProducerFallback:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_maxBlockSize:
+ bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN;
+ bounds.upperBound = ZSTD_BLOCKSIZE_MAX;
+ return bounds;
+
+ case ZSTD_c_repcodeResolution:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
default:
bounds.error = ERROR(parameter_unsupported);
return bounds;
@@ -567,10 +630,11 @@ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
return 0;
}
-#define BOUNDCHECK(cParam, val) { \
- RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
- parameter_outOfBound, "Param out of bounds"); \
-}
+#define BOUNDCHECK(cParam, val) \
+ do { \
+ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+ parameter_outOfBound, "Param out of bounds"); \
+ } while (0)
static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
@@ -584,6 +648,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_minMatch:
case ZSTD_c_targetLength:
case ZSTD_c_strategy:
+ case ZSTD_c_blockSplitterLevel:
return 1;
case ZSTD_c_format:
@@ -610,9 +675,13 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
case ZSTD_c_validateSequences:
- case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_splitAfterSequences:
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
+ case ZSTD_c_prefetchCDictTables:
+ case ZSTD_c_enableSeqProducerFallback:
+ case ZSTD_c_maxBlockSize:
+ case ZSTD_c_repcodeResolution:
default:
return 0;
}
@@ -625,7 +694,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
if (ZSTD_isUpdateAuthorized(param)) {
cctx->cParamsChanged = 1;
} else {
- RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
+ RETURN_ERROR(stage_wrong, "can only set params in cctx init stage");
} }
switch(param)
@@ -665,9 +734,14 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
case ZSTD_c_validateSequences:
- case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_splitAfterSequences:
+ case ZSTD_c_blockSplitterLevel:
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
+ case ZSTD_c_prefetchCDictTables:
+ case ZSTD_c_enableSeqProducerFallback:
+ case ZSTD_c_maxBlockSize:
+ case ZSTD_c_repcodeResolution:
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
@@ -723,12 +797,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_minMatch :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_minMatch, value);
- CCtxParams->cParams.minMatch = value;
+ CCtxParams->cParams.minMatch = (U32)value;
return CCtxParams->cParams.minMatch;
case ZSTD_c_targetLength :
BOUNDCHECK(ZSTD_c_targetLength, value);
- CCtxParams->cParams.targetLength = value;
+ CCtxParams->cParams.targetLength = (U32)value;
return CCtxParams->cParams.targetLength;
case ZSTD_c_strategy :
@@ -741,12 +815,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
/* Content size written in frame header _when known_ (default:1) */
DEBUGLOG(4, "set content size flag = %u", (value!=0));
CCtxParams->fParams.contentSizeFlag = value != 0;
- return CCtxParams->fParams.contentSizeFlag;
+ return (size_t)CCtxParams->fParams.contentSizeFlag;
case ZSTD_c_checksumFlag :
/* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
CCtxParams->fParams.checksumFlag = value != 0;
- return CCtxParams->fParams.checksumFlag;
+ return (size_t)CCtxParams->fParams.checksumFlag;
case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
@@ -755,18 +829,18 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_forceMaxWindow :
CCtxParams->forceWindow = (value != 0);
- return CCtxParams->forceWindow;
+ return (size_t)CCtxParams->forceWindow;
case ZSTD_c_forceAttachDict : {
const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
- BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
+ BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref);
CCtxParams->attachDictPref = pref;
return CCtxParams->attachDictPref;
}
case ZSTD_c_literalCompressionMode : {
- const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value;
- BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
+ const ZSTD_ParamSwitch_e lcm = (ZSTD_ParamSwitch_e)value;
+ BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm);
CCtxParams->literalCompressionMode = lcm;
return CCtxParams->literalCompressionMode;
}
@@ -789,47 +863,50 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_enableDedicatedDictSearch :
CCtxParams->enableDedicatedDictSearch = (value!=0);
- return CCtxParams->enableDedicatedDictSearch;
+ return (size_t)CCtxParams->enableDedicatedDictSearch;
case ZSTD_c_enableLongDistanceMatching :
- CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value;
+ BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value);
+ CCtxParams->ldmParams.enableLdm = (ZSTD_ParamSwitch_e)value;
return CCtxParams->ldmParams.enableLdm;
case ZSTD_c_ldmHashLog :
if (value!=0) /* 0 ==> auto */
BOUNDCHECK(ZSTD_c_ldmHashLog, value);
- CCtxParams->ldmParams.hashLog = value;
+ CCtxParams->ldmParams.hashLog = (U32)value;
return CCtxParams->ldmParams.hashLog;
case ZSTD_c_ldmMinMatch :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
- CCtxParams->ldmParams.minMatchLength = value;
+ CCtxParams->ldmParams.minMatchLength = (U32)value;
return CCtxParams->ldmParams.minMatchLength;
case ZSTD_c_ldmBucketSizeLog :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
- CCtxParams->ldmParams.bucketSizeLog = value;
+ CCtxParams->ldmParams.bucketSizeLog = (U32)value;
return CCtxParams->ldmParams.bucketSizeLog;
case ZSTD_c_ldmHashRateLog :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
- CCtxParams->ldmParams.hashRateLog = value;
+ CCtxParams->ldmParams.hashRateLog = (U32)value;
return CCtxParams->ldmParams.hashRateLog;
case ZSTD_c_targetCBlockSize :
- if (value!=0) /* 0 ==> default */
+ if (value!=0) { /* 0 ==> default */
+ value = MAX(value, ZSTD_TARGETCBLOCKSIZE_MIN);
BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
- CCtxParams->targetCBlockSize = value;
+ }
+ CCtxParams->targetCBlockSize = (U32)value;
return CCtxParams->targetCBlockSize;
case ZSTD_c_srcSizeHint :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_srcSizeHint, value);
CCtxParams->srcSizeHint = value;
- return CCtxParams->srcSizeHint;
+ return (size_t)CCtxParams->srcSizeHint;
case ZSTD_c_stableInBuffer:
BOUNDCHECK(ZSTD_c_stableInBuffer, value);
@@ -843,28 +920,55 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_blockDelimiters:
BOUNDCHECK(ZSTD_c_blockDelimiters, value);
- CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
+ CCtxParams->blockDelimiters = (ZSTD_SequenceFormat_e)value;
return CCtxParams->blockDelimiters;
case ZSTD_c_validateSequences:
BOUNDCHECK(ZSTD_c_validateSequences, value);
CCtxParams->validateSequences = value;
- return CCtxParams->validateSequences;
+ return (size_t)CCtxParams->validateSequences;
+
+ case ZSTD_c_splitAfterSequences:
+ BOUNDCHECK(ZSTD_c_splitAfterSequences, value);
+ CCtxParams->postBlockSplitter = (ZSTD_ParamSwitch_e)value;
+ return CCtxParams->postBlockSplitter;
- case ZSTD_c_useBlockSplitter:
- BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
- CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value;
- return CCtxParams->useBlockSplitter;
+ case ZSTD_c_blockSplitterLevel:
+ BOUNDCHECK(ZSTD_c_blockSplitterLevel, value);
+ CCtxParams->preBlockSplitter_level = value;
+ return (size_t)CCtxParams->preBlockSplitter_level;
case ZSTD_c_useRowMatchFinder:
BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
- CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value;
+ CCtxParams->useRowMatchFinder = (ZSTD_ParamSwitch_e)value;
return CCtxParams->useRowMatchFinder;
case ZSTD_c_deterministicRefPrefix:
BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
CCtxParams->deterministicRefPrefix = !!value;
- return CCtxParams->deterministicRefPrefix;
+ return (size_t)CCtxParams->deterministicRefPrefix;
+
+ case ZSTD_c_prefetchCDictTables:
+ BOUNDCHECK(ZSTD_c_prefetchCDictTables, value);
+ CCtxParams->prefetchCDictTables = (ZSTD_ParamSwitch_e)value;
+ return CCtxParams->prefetchCDictTables;
+
+ case ZSTD_c_enableSeqProducerFallback:
+ BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value);
+ CCtxParams->enableMatchFinderFallback = value;
+ return (size_t)CCtxParams->enableMatchFinderFallback;
+
+ case ZSTD_c_maxBlockSize:
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_maxBlockSize, value);
+ assert(value>=0);
+ CCtxParams->maxBlockSize = (size_t)value;
+ return CCtxParams->maxBlockSize;
+
+ case ZSTD_c_repcodeResolution:
+ BOUNDCHECK(ZSTD_c_repcodeResolution, value);
+ CCtxParams->searchForExternalRepcodes = (ZSTD_ParamSwitch_e)value;
+ return CCtxParams->searchForExternalRepcodes;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
@@ -881,7 +985,7 @@ size_t ZSTD_CCtxParams_getParameter(
switch(param)
{
case ZSTD_c_format :
- *value = CCtxParams->format;
+ *value = (int)CCtxParams->format;
break;
case ZSTD_c_compressionLevel :
*value = CCtxParams->compressionLevel;
@@ -896,16 +1000,16 @@ size_t ZSTD_CCtxParams_getParameter(
*value = (int)CCtxParams->cParams.chainLog;
break;
case ZSTD_c_searchLog :
- *value = CCtxParams->cParams.searchLog;
+ *value = (int)CCtxParams->cParams.searchLog;
break;
case ZSTD_c_minMatch :
- *value = CCtxParams->cParams.minMatch;
+ *value = (int)CCtxParams->cParams.minMatch;
break;
case ZSTD_c_targetLength :
- *value = CCtxParams->cParams.targetLength;
+ *value = (int)CCtxParams->cParams.targetLength;
break;
case ZSTD_c_strategy :
- *value = (unsigned)CCtxParams->cParams.strategy;
+ *value = (int)CCtxParams->cParams.strategy;
break;
case ZSTD_c_contentSizeFlag :
*value = CCtxParams->fParams.contentSizeFlag;
@@ -920,10 +1024,10 @@ size_t ZSTD_CCtxParams_getParameter(
*value = CCtxParams->forceWindow;
break;
case ZSTD_c_forceAttachDict :
- *value = CCtxParams->attachDictPref;
+ *value = (int)CCtxParams->attachDictPref;
break;
case ZSTD_c_literalCompressionMode :
- *value = CCtxParams->literalCompressionMode;
+ *value = (int)CCtxParams->literalCompressionMode;
break;
case ZSTD_c_nbWorkers :
assert(CCtxParams->nbWorkers == 0);
@@ -939,19 +1043,19 @@ size_t ZSTD_CCtxParams_getParameter(
*value = CCtxParams->enableDedicatedDictSearch;
break;
case ZSTD_c_enableLongDistanceMatching :
- *value = CCtxParams->ldmParams.enableLdm;
+ *value = (int)CCtxParams->ldmParams.enableLdm;
break;
case ZSTD_c_ldmHashLog :
- *value = CCtxParams->ldmParams.hashLog;
+ *value = (int)CCtxParams->ldmParams.hashLog;
break;
case ZSTD_c_ldmMinMatch :
- *value = CCtxParams->ldmParams.minMatchLength;
+ *value = (int)CCtxParams->ldmParams.minMatchLength;
break;
case ZSTD_c_ldmBucketSizeLog :
- *value = CCtxParams->ldmParams.bucketSizeLog;
+ *value = (int)CCtxParams->ldmParams.bucketSizeLog;
break;
case ZSTD_c_ldmHashRateLog :
- *value = CCtxParams->ldmParams.hashRateLog;
+ *value = (int)CCtxParams->ldmParams.hashRateLog;
break;
case ZSTD_c_targetCBlockSize :
*value = (int)CCtxParams->targetCBlockSize;
@@ -971,8 +1075,11 @@ size_t ZSTD_CCtxParams_getParameter(
case ZSTD_c_validateSequences :
*value = (int)CCtxParams->validateSequences;
break;
- case ZSTD_c_useBlockSplitter :
- *value = (int)CCtxParams->useBlockSplitter;
+ case ZSTD_c_splitAfterSequences :
+ *value = (int)CCtxParams->postBlockSplitter;
+ break;
+ case ZSTD_c_blockSplitterLevel :
+ *value = CCtxParams->preBlockSplitter_level;
break;
case ZSTD_c_useRowMatchFinder :
*value = (int)CCtxParams->useRowMatchFinder;
@@ -980,6 +1087,18 @@ size_t ZSTD_CCtxParams_getParameter(
case ZSTD_c_deterministicRefPrefix:
*value = (int)CCtxParams->deterministicRefPrefix;
break;
+ case ZSTD_c_prefetchCDictTables:
+ *value = (int)CCtxParams->prefetchCDictTables;
+ break;
+ case ZSTD_c_enableSeqProducerFallback:
+ *value = CCtxParams->enableMatchFinderFallback;
+ break;
+ case ZSTD_c_maxBlockSize:
+ *value = (int)CCtxParams->maxBlockSize;
+ break;
+ case ZSTD_c_repcodeResolution:
+ *value = (int)CCtxParams->searchForExternalRepcodes;
+ break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
return 0;
@@ -1006,9 +1125,47 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(
return 0;
}
+size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams)
+{
+ ZSTD_STATIC_ASSERT(sizeof(cparams) == 7 * 4 /* all params are listed below */);
+ DEBUGLOG(4, "ZSTD_CCtx_setCParams");
+ /* only update if all parameters are valid */
+ FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, (int)cparams.windowLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, (int)cparams.chainLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, (int)cparams.hashLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, (int)cparams.searchLog), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, (int)cparams.minMatch), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, (int)cparams.targetLength), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, (int)cparams.strategy), "");
+ return 0;
+}
+
+size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams)
+{
+ ZSTD_STATIC_ASSERT(sizeof(fparams) == 3 * 4 /* all params are listed below */);
+ DEBUGLOG(4, "ZSTD_CCtx_setFParams");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, fparams.checksumFlag != 0), "");
+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0), "");
+ return 0;
+}
+
+size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParams");
+ /* First check cParams, because we want to update all or none. */
+ FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
+ /* Next set fParams, because this could fail if the cctx isn't in init stage. */
+ FORWARD_IF_ERROR(ZSTD_CCtx_setFParams(cctx, params.fParams), "");
+ /* Finally set cParams, which should succeed. */
+ FORWARD_IF_ERROR(ZSTD_CCtx_setCParams(cctx, params.cParams), "");
+ return 0;
+}
+
size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
{
- DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
+ DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize);
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't set pledgedSrcSize when not in init stage.");
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
@@ -1024,9 +1181,9 @@ static void ZSTD_dedicatedDictSearch_revertCParams(
ZSTD_compressionParameters* cParams);
/*
- * Initializes the local dict using the requested parameters.
- * NOTE: This does not use the pledged src size, because it may be used for more
- * than one compression.
+ * Initializes the local dictionary using requested parameters.
+ * NOTE: Initialization does not employ the pledged src size,
+ * because the dictionary may be used for multiple compressions.
*/
static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
{
@@ -1039,8 +1196,8 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
return 0;
}
if (dl->cdict != NULL) {
- assert(cctx->cdict == dl->cdict);
/* Local dictionary already initialized. */
+ assert(cctx->cdict == dl->cdict);
return 0;
}
assert(dl->dictSize > 0);
@@ -1060,26 +1217,30 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
}
size_t ZSTD_CCtx_loadDictionary_advanced(
- ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+ ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
{
- RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
- "Can't load a dictionary when ctx is not in init stage.");
DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
- ZSTD_clearAllDicts(cctx); /* in case one already exists */
- if (dict == NULL || dictSize == 0) /* no dictionary mode */
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't load a dictionary when cctx is not in init stage.");
+ ZSTD_clearAllDicts(cctx); /* erase any previously set dictionary */
+ if (dict == NULL || dictSize == 0) /* no dictionary */
return 0;
if (dictLoadMethod == ZSTD_dlm_byRef) {
cctx->localDict.dict = dict;
} else {
+ /* copy dictionary content inside CCtx to own its lifetime */
void* dictBuffer;
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
- "no malloc for static CCtx");
+ "static CCtx can't allocate for an internal copy of dictionary");
dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
- RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
+ RETURN_ERROR_IF(dictBuffer==NULL, memory_allocation,
+ "allocation failed for dictionary content");
ZSTD_memcpy(dictBuffer, dict, dictSize);
- cctx->localDict.dictBuffer = dictBuffer;
- cctx->localDict.dict = dictBuffer;
+ cctx->localDict.dictBuffer = dictBuffer; /* owned ptr to free */
+ cctx->localDict.dict = dictBuffer; /* read-only reference */
}
cctx->localDict.dictSize = dictSize;
cctx->localDict.dictContentType = dictContentType;
@@ -1149,7 +1310,7 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
- "Can't reset parameters only when not in init stage.");
+ "Reset parameters is only possible during init stage.");
ZSTD_clearAllDicts(cctx);
return ZSTD_CCtxParams_reset(&cctx->requestedParams);
}
@@ -1168,7 +1329,7 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
- BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
+ BOUNDCHECK(ZSTD_c_strategy, (int)cParams.strategy);
return 0;
}
@@ -1178,11 +1339,12 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
static ZSTD_compressionParameters
ZSTD_clampCParams(ZSTD_compressionParameters cParams)
{
-# define CLAMP_TYPE(cParam, val, type) { \
- ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
- if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
- else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
- }
+# define CLAMP_TYPE(cParam, val, type) \
+ do { \
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
+ if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
+ else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
+ } while (0)
# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
CLAMP(ZSTD_c_windowLog, cParams.windowLog);
CLAMP(ZSTD_c_chainLog, cParams.chainLog);
@@ -1240,19 +1402,62 @@ static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
* optimize `cPar` for a specified input (`srcSize` and `dictSize`).
* mostly downsize to reduce memory consumption and initialization latency.
* `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
- * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
+ * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`.
* note : `srcSize==0` means 0!
* condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
static ZSTD_compressionParameters
ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize,
- ZSTD_cParamMode_e mode)
+ ZSTD_CParamMode_e mode,
+ ZSTD_ParamSwitch_e useRowMatchFinder)
{
const U64 minSrcSize = 513; /* (1<<9) + 1 */
const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
assert(ZSTD_checkCParams(cPar)==0);
+ /* Cascade the selected strategy down to the next-highest one built into
+ * this binary. */
+#ifdef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_btultra2) {
+ cPar.strategy = ZSTD_btultra;
+ }
+ if (cPar.strategy == ZSTD_btultra) {
+ cPar.strategy = ZSTD_btopt;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_btopt) {
+ cPar.strategy = ZSTD_btlazy2;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_btlazy2) {
+ cPar.strategy = ZSTD_lazy2;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_lazy2) {
+ cPar.strategy = ZSTD_lazy;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_lazy) {
+ cPar.strategy = ZSTD_greedy;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_greedy) {
+ cPar.strategy = ZSTD_dfast;
+ }
+#endif
+#ifdef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+ if (cPar.strategy == ZSTD_dfast) {
+ cPar.strategy = ZSTD_fast;
+ cPar.targetLength = 0;
+ }
+#endif
+
switch (mode) {
case ZSTD_cpm_unknown:
case ZSTD_cpm_noAttachDict:
@@ -1281,8 +1486,8 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
}
/* resize windowLog if input is small enough, to use less memory */
- if ( (srcSize < maxWindowResize)
- && (dictSize < maxWindowResize) ) {
+ if ( (srcSize <= maxWindowResize)
+ && (dictSize <= maxWindowResize) ) {
U32 const tSize = (U32)(srcSize + dictSize);
static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
@@ -1300,6 +1505,42 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
+ /* We can't use more than 32 bits of hash in total, so that means that we require:
+ * (hashLog + 8) <= 32 && (chainLog + 8) <= 32
+ */
+ if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) {
+ U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS;
+ if (cPar.hashLog > maxShortCacheHashLog) {
+ cPar.hashLog = maxShortCacheHashLog;
+ }
+ if (cPar.chainLog > maxShortCacheHashLog) {
+ cPar.chainLog = maxShortCacheHashLog;
+ }
+ }
+
+
+ /* At this point, we aren't 100% sure if we are using the row match finder.
+ * Unless it is explicitly disabled, conservatively assume that it is enabled.
+ * In this case it will only be disabled for small sources, so shrinking the
+ * hash log a little bit shouldn't result in any ratio loss.
+ */
+ if (useRowMatchFinder == ZSTD_ps_auto)
+ useRowMatchFinder = ZSTD_ps_enable;
+
+ /* We can't hash more than 32-bits in total. So that means that we require:
+ * (hashLog - rowLog + 8) <= 32
+ */
+ if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) {
+ /* Switch to 32-entry rows if searchLog is 5 (or more) */
+ U32 const rowLog = BOUNDED(4, cPar.searchLog, 6);
+ U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS;
+ U32 const maxHashLog = maxRowHashLog + rowLog;
+ assert(cPar.hashLog >= rowLog);
+ if (cPar.hashLog > maxHashLog) {
+ cPar.hashLog = maxHashLog;
+ }
+ }
+
return cPar;
}
@@ -1310,11 +1551,11 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
{
cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
- return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
+ return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto);
}
-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);
static void ZSTD_overrideCParams(
ZSTD_compressionParameters* cParams,
@@ -1330,24 +1571,25 @@ static void ZSTD_overrideCParams(
}
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
{
ZSTD_compressionParameters cParams;
if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
- srcSizeHint = CCtxParams->srcSizeHint;
+ assert(CCtxParams->srcSizeHint>=0);
+ srcSizeHint = (U64)CCtxParams->srcSizeHint;
}
cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
assert(!ZSTD_checkCParams(cParams));
/* srcSizeHint == 0 means 0 */
- return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
+ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder);
}
static size_t
ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
- const ZSTD_paramSwitch_e useRowMatchFinder,
- const U32 enableDedicatedDictSearch,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
+ const int enableDedicatedDictSearch,
const U32 forCCtx)
{
/* chain table size should be 0 for fast or row-hash strategies */
@@ -1363,14 +1605,14 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ hSize * sizeof(U32)
+ h3Size * sizeof(U32);
size_t const optPotentialSpace =
- ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
- + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
- + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
- + ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
- + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
- + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ ZSTD_cwksp_aligned64_alloc_size((MaxML+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned64_alloc_size((MaxLL+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned64_alloc_size((MaxOff+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned64_alloc_size((1<<Litbits) * sizeof(U32))
+ + ZSTD_cwksp_aligned64_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_match_t))
+ + ZSTD_cwksp_aligned64_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));
size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
- ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16))
+ ? ZSTD_cwksp_aligned64_alloc_size(hSize)
: 0;
size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
? optPotentialSpace
@@ -1386,30 +1628,38 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
}
+/* Helper function for calculating memory requirements.
+ * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */
+static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) {
+ U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4;
+ return blockSize / divider;
+}
+
static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const ZSTD_compressionParameters* cParams,
const ldmParams_t* ldmParams,
const int isStatic,
- const ZSTD_paramSwitch_e useRowMatchFinder,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
const size_t buffInSize,
const size_t buffOutSize,
- const U64 pledgedSrcSize)
+ const U64 pledgedSrcSize,
+ int useSequenceProducer,
+ size_t maxBlockSize)
{
size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize);
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
- U32 const divider = (cParams->minMatch==3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
+ size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize);
+ size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer);
size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
- + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
+ + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * sizeof(SeqDef))
+ 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
- size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
+ size_t const tmpWorkSpace = ZSTD_cwksp_alloc_size(TMP_WORKSPACE_SIZE);
size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
- ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
+ ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
@@ -1417,15 +1667,21 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
+ size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
+ size_t const externalSeqSpace = useSequenceProducer
+ ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence))
+ : 0;
+
size_t const neededSpace =
cctxSpace +
- entropySpace +
+ tmpWorkSpace +
blockStateSpace +
ldmSpace +
ldmSeqSpace +
matchStateSize +
tokenSpace +
- bufferSpace;
+ bufferSpace +
+ externalSeqSpace;
DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
return neededSpace;
@@ -1435,7 +1691,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
- ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
+ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
&cParams);
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
@@ -1443,7 +1699,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
* be needed. However, we still allocate two 0-sized buffers, which can
* take space under ASAN. */
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
- &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
+ &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
}
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
@@ -1493,18 +1749,18 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
{ ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+ size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog);
size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
? ((size_t)1 << cParams.windowLog) + blockSize
: 0;
size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
? ZSTD_compressBound(blockSize) + 1
: 0;
- ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &params->cParams);
+ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &params->cParams);
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&cParams, &params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
- ZSTD_CONTENTSIZE_UNKNOWN);
+ ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
}
}
@@ -1600,7 +1856,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
* Invalidate all the matches in the match finder tables.
* Requires nextSrc and base to be set (can be NULL).
*/
-static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
+static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms)
{
ZSTD_window_clear(&ms->window);
@@ -1637,12 +1893,25 @@ typedef enum {
ZSTD_resetTarget_CCtx
} ZSTD_resetTarget_e;
+/* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */
+static U64 ZSTD_bitmix(U64 val, U64 len) {
+ val ^= ZSTD_rotateRight_U64(val, 49) ^ ZSTD_rotateRight_U64(val, 24);
+ val *= 0x9FB21C651E98DF25ULL;
+ val ^= (val >> 35) + len ;
+ val *= 0x9FB21C651E98DF25ULL;
+ return val ^ (val >> 28);
+}
+
+/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */
+static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) {
+ ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4);
+}
static size_t
-ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+ZSTD_reset_matchState(ZSTD_MatchState_t* ms,
ZSTD_cwksp* ws,
const ZSTD_compressionParameters* cParams,
- const ZSTD_paramSwitch_e useRowMatchFinder,
+ const ZSTD_ParamSwitch_e useRowMatchFinder,
const ZSTD_compResetPolicy_e crp,
const ZSTD_indexResetPolicy_e forceResetIndex,
const ZSTD_resetTarget_e forWho)
@@ -1664,6 +1933,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
}
ms->hashLog3 = hashLog3;
+ ms->lazySkipping = 0;
ZSTD_invalidateMatchState(ms);
@@ -1685,22 +1955,19 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ZSTD_cwksp_clean_tables(ws);
}
- /* opt parser space */
- if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
- DEBUGLOG(4, "reserving optimal parser space");
- ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
- ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
- ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
- ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
- ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
- ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
- }
-
if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
- { /* Row match finder needs an additional table of hashes ("tags") */
- size_t const tagTableSize = hSize*sizeof(U16);
- ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
- if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ /* Row match finder needs an additional table of hashes ("tags") */
+ size_t const tagTableSize = hSize;
+ /* We want to generate a new salt in case we reset a Cctx, but we always want to use
+ * 0 when we reset a Cdict */
+ if(forWho == ZSTD_resetTarget_CCtx) {
+ ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize);
+ ZSTD_advanceHashSalt(ms);
+ } else {
+ /* When we are not salting we want to always memset the memory */
+ ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned64(ws, tagTableSize);
+ ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ ms->hashSalt = 0;
}
{ /* Switch to 32-entry rows if searchLog is 5 (or more) */
U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
@@ -1709,6 +1976,17 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
}
}
+ /* opt parser space */
+ if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
+ DEBUGLOG(4, "reserving optimal parser space");
+ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (1<<Litbits) * sizeof(unsigned));
+ ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxLL+1) * sizeof(unsigned));
+ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxML+1) * sizeof(unsigned));
+ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxOff+1) * sizeof(unsigned));
+ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t));
+ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));
+ }
+
ms->cParams = *cParams;
RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
@@ -1754,7 +2032,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
{
ZSTD_cwksp* const ws = &zc->workspace;
DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
- (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
+ (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->postBlockSplitter);
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
zc->isFirstBlock = 1;
@@ -1766,8 +2044,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
params = &zc->appliedParams;
assert(params->useRowMatchFinder != ZSTD_ps_auto);
- assert(params->useBlockSplitter != ZSTD_ps_auto);
+ assert(params->postBlockSplitter != ZSTD_ps_auto);
assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
+ assert(params->maxBlockSize != 0);
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* Adjust long distance matching parameters */
ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &params->cParams);
@@ -1776,9 +2055,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
}
{ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
- U32 const divider = (params->cParams.minMatch==3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
+ size_t const blockSize = MIN(params->maxBlockSize, windowSize);
+ size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, ZSTD_hasExtSeqProd(params));
size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
? ZSTD_compressBound(blockSize) + 1
: 0;
@@ -1795,8 +2073,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
size_t const neededSpace =
ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&params->cParams, &params->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
- buffInSize, buffOutSize, pledgedSrcSize);
- int resizeWorkspace;
+ buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
@@ -1805,7 +2082,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
{ /* Check if workspace is large enough, alloc a new one if needed */
int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
- resizeWorkspace = workspaceTooSmall || workspaceWasteful;
+ int resizeWorkspace = workspaceTooSmall || workspaceWasteful;
DEBUGLOG(4, "Need %zu B workspace", neededSpace);
DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
@@ -1823,21 +2100,23 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
DEBUGLOG(5, "reserving object space");
/* Statically sized space.
- * entropyWorkspace never moves,
+ * tmpWorkspace never moves,
* though prev/next block swap places */
assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
- zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
- RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
+ zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, TMP_WORKSPACE_SIZE);
+ RETURN_ERROR_IF(zc->tmpWorkspace == NULL, memory_allocation, "couldn't allocate tmpWorkspace");
+ zc->tmpWkspSize = TMP_WORKSPACE_SIZE;
} }
ZSTD_cwksp_clear(ws);
/* init params */
zc->blockState.matchState.cParams = params->cParams;
+ zc->blockState.matchState.prefetchCDictTables = params->prefetchCDictTables == ZSTD_ps_enable;
zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
zc->consumedSrcSize = 0;
zc->producedCSize = 0;
@@ -1845,7 +2124,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->appliedParams.fParams.contentSizeFlag = 0;
DEBUGLOG(4, "pledged content size : %u ; flag : %u",
(unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
- zc->blockSize = blockSize;
+ zc->blockSizeMax = blockSize;
xxh64_reset(&zc->xxhState, 0);
zc->stage = ZSTDcs_init;
@@ -1854,13 +2133,46 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &zc->blockState.matchState,
+ ws,
+ &params->cParams,
+ params->useRowMatchFinder,
+ crp,
+ needsIndexReset,
+ ZSTD_resetTarget_CCtx), "");
+
+ zc->seqStore.sequencesStart = (SeqDef*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * sizeof(SeqDef));
+
+ /* ldm hash table */
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
+ /* TODO: avoid memset? */
+ size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
+ zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * sizeof(ldmEntry_t));
+ ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
+ zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * sizeof(rawSeq));
+ zc->maxNbLdmSequences = maxNbLdmSeq;
+
+ ZSTD_window_init(&zc->ldmState.window);
+ zc->ldmState.loadedDictEnd = 0;
+ }
+
+ /* reserve space for block-level external sequences */
+ if (ZSTD_hasExtSeqProd(params)) {
+ size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
+ zc->extSeqBufCapacity = maxNbExternalSeq;
+ zc->extSeqBuf =
+ (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence));
+ }
+
+ /* buffers */
+
/* ZSTD_wildcopy() is used to copy into the literals buffer,
* so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
*/
zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
zc->seqStore.maxNbLit = blockSize;
- /* buffers */
zc->bufferedPolicy = zbuff;
zc->inBuffSize = buffInSize;
zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
@@ -1883,32 +2195,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
- zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
-
- FORWARD_IF_ERROR(ZSTD_reset_matchState(
- &zc->blockState.matchState,
- ws,
- &params->cParams,
- params->useRowMatchFinder,
- crp,
- needsIndexReset,
- ZSTD_resetTarget_CCtx), "");
-
- /* ldm hash table */
- if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
- /* TODO: avoid memset? */
- size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
- zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
- ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
- zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
- zc->maxNbLdmSequences = maxNbLdmSeq;
-
- ZSTD_window_init(&zc->ldmState.window);
- zc->ldmState.loadedDictEnd = 0;
- }
DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
- assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace));
+ assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace));
zc->initialized = 1;
@@ -1980,7 +2269,8 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
}
params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
- cdict->dictContentSize, ZSTD_cpm_attachDict);
+ cdict->dictContentSize, ZSTD_cpm_attachDict,
+ params.useRowMatchFinder);
params.cParams.windowLog = windowLog;
params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
@@ -2019,6 +2309,22 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
return 0;
}
+static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize,
+ ZSTD_compressionParameters const* cParams) {
+ if (ZSTD_CDictIndicesAreTagged(cParams)){
+ /* Remove tags from the CDict table if they are present.
+ * See docs on "short cache" in zstd_compress_internal.h for context. */
+ size_t i;
+ for (i = 0; i < tableSize; i++) {
+ U32 const taggedIndex = src[i];
+ U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS;
+ dst[i] = index;
+ }
+ } else {
+ ZSTD_memcpy(dst, src, tableSize * sizeof(U32));
+ }
+}
+
static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
@@ -2054,26 +2360,29 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
: 0;
size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
- ZSTD_memcpy(cctx->blockState.matchState.hashTable,
- cdict->matchState.hashTable,
- hSize * sizeof(U32));
+ ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable,
+ cdict->matchState.hashTable,
+ hSize, cdict_cParams);
+
/* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
- ZSTD_memcpy(cctx->blockState.matchState.chainTable,
- cdict->matchState.chainTable,
- chainSize * sizeof(U32));
+ ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable,
+ cdict->matchState.chainTable,
+ chainSize, cdict_cParams);
}
/* copy tag table */
if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
- size_t const tagTableSize = hSize*sizeof(U16);
+ size_t const tagTableSize = hSize;
ZSTD_memcpy(cctx->blockState.matchState.tagTable,
- cdict->matchState.tagTable,
- tagTableSize);
+ cdict->matchState.tagTable,
+ tagTableSize);
+ cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt;
}
}
/* Zero the hashTable3, since the cdict never fills it */
- { int const h3log = cctx->blockState.matchState.hashLog3;
+ assert(cctx->blockState.matchState.hashLog3 <= 31);
+ { U32 const h3log = cctx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
assert(cdict->matchState.hashLog3 == 0);
ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
@@ -2082,8 +2391,8 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
/* copy dictionary offsets */
- { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
- ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
+ { ZSTD_MatchState_t const* srcMatchState = &cdict->matchState;
+ ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
@@ -2141,12 +2450,13 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
/* Copy only compression parameters related to tables. */
params.cParams = srcCCtx->appliedParams.cParams;
assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
- assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto);
+ assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_ps_auto);
assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
- params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
+ params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter;
params.ldmParams = srcCCtx->appliedParams.ldmParams;
params.fParams = fParams;
+ params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize;
ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize,
/* loadedDictSize */ 0,
ZSTDcrp_leaveDirty, zbuff);
@@ -2166,7 +2476,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
: 0;
size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
- int const h3log = srcCCtx->blockState.matchState.hashLog3;
+ U32 const h3log = srcCCtx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
@@ -2184,8 +2494,8 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
/* copy dictionary offsets */
{
- const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
- ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
+ const ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState;
+ ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
@@ -2234,7 +2544,7 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
/* Protect special index values < ZSTD_WINDOW_START_INDEX. */
U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
- assert(size < (1U<<31)); /* can be casted to int */
+ assert(size < (1U<<31)); /* can be cast to int */
for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
@@ -2267,7 +2577,7 @@ static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const
/*! ZSTD_reduceIndex() :
* rescale all indexes to avoid future overflow (indexes are U32) */
-static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
+static void ZSTD_reduceIndex (ZSTD_MatchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
{
{ U32 const hSize = (U32)1 << params->cParams.hashLog;
ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
@@ -2294,26 +2604,32 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par
/* See doc/zstd_compression_format.md for detailed format description */
-void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr)
{
- const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const SeqDef* const sequences = seqStorePtr->sequencesStart;
BYTE* const llCodeTable = seqStorePtr->llCode;
BYTE* const ofCodeTable = seqStorePtr->ofCode;
BYTE* const mlCodeTable = seqStorePtr->mlCode;
U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
U32 u;
+ int longOffsets = 0;
assert(nbSeq <= seqStorePtr->maxNbSeq);
for (u=0; u<nbSeq; u++) {
U32 const llv = sequences[u].litLength;
+ U32 const ofCode = ZSTD_highbit32(sequences[u].offBase);
U32 const mlv = sequences[u].mlBase;
llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
- ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase);
+ ofCodeTable[u] = (BYTE)ofCode;
mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
+ assert(!(MEM_64bits() && ofCode >= STREAM_ACCUMULATOR_MIN));
+ if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN)
+ longOffsets = 1;
}
if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)
mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+ return longOffsets;
}
/* ZSTD_useTargetCBlockSize():
@@ -2333,9 +2649,9 @@ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
* Returns 1 if true, 0 otherwise. */
static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
{
- DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter);
- assert(cctxParams->useBlockSplitter != ZSTD_ps_auto);
- return (cctxParams->useBlockSplitter == ZSTD_ps_enable);
+ DEBUGLOG(5, "ZSTD_blockSplitterEnabled (postBlockSplitter=%d)", cctxParams->postBlockSplitter);
+ assert(cctxParams->postBlockSplitter != ZSTD_ps_auto);
+ return (cctxParams->postBlockSplitter == ZSTD_ps_enable);
}
/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
@@ -2347,6 +2663,7 @@ typedef struct {
U32 MLtype;
size_t size;
size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
+ int longOffsets;
} ZSTD_symbolEncodingTypeStats_t;
/* ZSTD_buildSequencesStatistics():
@@ -2357,11 +2674,13 @@ typedef struct {
* entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
*/
static ZSTD_symbolEncodingTypeStats_t
-ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
- const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
- BYTE* dst, const BYTE* const dstEnd,
- ZSTD_strategy strategy, unsigned* countWorkspace,
- void* entropyWorkspace, size_t entropyWkspSize) {
+ZSTD_buildSequencesStatistics(
+ const SeqStore_t* seqStorePtr, size_t nbSeq,
+ const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
+ BYTE* dst, const BYTE* const dstEnd,
+ ZSTD_strategy strategy, unsigned* countWorkspace,
+ void* entropyWorkspace, size_t entropyWkspSize)
+{
BYTE* const ostart = dst;
const BYTE* const oend = dstEnd;
BYTE* op = ostart;
@@ -2375,7 +2694,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
stats.lastCountSize = 0;
/* convert length/distances into codes */
- ZSTD_seqToCodes(seqStorePtr);
+ stats.longOffsets = ZSTD_seqToCodes(seqStorePtr);
assert(op <= oend);
assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
/* build CTable for Literal Lengths */
@@ -2392,7 +2711,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
- CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype,
+ CTable_LitLength, LLFSELog, (SymbolEncodingType_e)stats.LLtype,
countWorkspace, max, llCodeTable, nbSeq,
LL_defaultNorm, LL_defaultNormLog, MaxLL,
prevEntropy->litlengthCTable,
@@ -2413,7 +2732,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
size_t const mostFrequent = HIST_countFast_wksp(
countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
- ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ ZSTD_DefaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
DEBUGLOG(5, "Building OF table");
nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
@@ -2424,7 +2743,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
- CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype,
+ CTable_OffsetBits, OffFSELog, (SymbolEncodingType_e)stats.Offtype,
countWorkspace, max, ofCodeTable, nbSeq,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
prevEntropy->offcodeCTable,
@@ -2454,7 +2773,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
- CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype,
+ CTable_MatchLength, MLFSELog, (SymbolEncodingType_e)stats.MLtype,
countWorkspace, max, mlCodeTable, nbSeq,
ML_defaultNorm, ML_defaultNormLog, MaxML,
prevEntropy->matchlengthCTable,
@@ -2480,22 +2799,23 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
*/
#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
MEM_STATIC size_t
-ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
- const ZSTD_entropyCTables_t* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- void* dst, size_t dstCapacity,
- void* entropyWorkspace, size_t entropyWkspSize,
- const int bmi2)
+ZSTD_entropyCompressSeqStore_internal(
+ void* dst, size_t dstCapacity,
+ const void* literals, size_t litSize,
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ const int bmi2)
{
- const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
unsigned* count = (unsigned*)entropyWorkspace;
FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
- const seqDef* const sequences = seqStorePtr->sequencesStart;
- const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ const SeqDef* const sequences = seqStorePtr->sequencesStart;
+ const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
@@ -2503,29 +2823,28 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
size_t lastCountSize;
+ int longOffsets = 0;
entropyWorkspace = count + (MaxSeq + 1);
entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
- DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq);
+ DEBUGLOG(5, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu, dstCapacity=%zu)", nbSeq, dstCapacity);
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
/* Compress literals */
- { const BYTE* const literals = seqStorePtr->litStart;
- size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart;
- size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart;
+ { size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
/* Base suspicion of uncompressibility on ratio of literals to sequences */
- unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
- size_t const litSize = (size_t)(seqStorePtr->lit - literals);
+ int const suspectUncompressible = (numSequences == 0) || (litSize / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
+
size_t const cSize = ZSTD_compressLiterals(
- &prevEntropy->huf, &nextEntropy->huf,
- cctxParams->cParams.strategy,
- ZSTD_literalsCompressionIsDisabled(cctxParams),
op, dstCapacity,
literals, litSize,
entropyWorkspace, entropyWkspSize,
- bmi2, suspectUncompressible);
+ &prevEntropy->huf, &nextEntropy->huf,
+ cctxParams->cParams.strategy,
+ ZSTD_literalsCompressionIsDisabled(cctxParams),
+ suspectUncompressible, bmi2);
FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
assert(cSize <= dstCapacity);
op += cSize;
@@ -2551,11 +2870,10 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
return (size_t)(op - ostart);
}
- {
- ZSTD_symbolEncodingTypeStats_t stats;
- BYTE* seqHead = op++;
+ { BYTE* const seqHead = op++;
/* build stats for sequences */
- stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
+ const ZSTD_symbolEncodingTypeStats_t stats =
+ ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
&prevEntropy->fse, &nextEntropy->fse,
op, oend,
strategy, count,
@@ -2564,6 +2882,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
*seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
lastCountSize = stats.lastCountSize;
op += stats.size;
+ longOffsets = stats.longOffsets;
}
{ size_t const bitstreamSize = ZSTD_encodeSequences(
@@ -2597,104 +2916,146 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
return (size_t)(op - ostart);
}
-MEM_STATIC size_t
-ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
- const ZSTD_entropyCTables_t* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- void* dst, size_t dstCapacity,
- size_t srcSize,
- void* entropyWorkspace, size_t entropyWkspSize,
- int bmi2)
+static size_t
+ZSTD_entropyCompressSeqStore_wExtLitBuffer(
+ void* dst, size_t dstCapacity,
+ const void* literals, size_t litSize,
+ size_t blockSize,
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ int bmi2)
{
size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
- seqStorePtr, prevEntropy, nextEntropy, cctxParams,
dst, dstCapacity,
+ literals, litSize,
+ seqStorePtr, prevEntropy, nextEntropy, cctxParams,
entropyWorkspace, entropyWkspSize, bmi2);
if (cSize == 0) return 0;
/* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
* Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
*/
- if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
+ if ((cSize == ERROR(dstSize_tooSmall)) & (blockSize <= dstCapacity)) {
+ DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity);
return 0; /* block not compressed */
+ }
FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
/* Check compressibility */
- { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
+ { size_t const maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy);
if (cSize >= maxCSize) return 0; /* block not compressed */
}
- DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
+ DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
+ /* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly.
+ * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above.
+ */
+ assert(cSize < ZSTD_BLOCKSIZE_MAX);
return cSize;
}
+static size_t
+ZSTD_entropyCompressSeqStore(
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ size_t srcSize,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ int bmi2)
+{
+ return ZSTD_entropyCompressSeqStore_wExtLitBuffer(
+ dst, dstCapacity,
+ seqStorePtr->litStart, (size_t)(seqStorePtr->lit - seqStorePtr->litStart),
+ srcSize,
+ seqStorePtr,
+ prevEntropy, nextEntropy,
+ cctxParams,
+ entropyWorkspace, entropyWkspSize,
+ bmi2);
+}
+
/* ZSTD_selectBlockCompressor() :
* Not static, but internal use only (used by long distance matcher)
* assumption : strat is a valid strategy */
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
+ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
{
- static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
+ static const ZSTD_BlockCompressor_f blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
{ ZSTD_compressBlock_fast /* default for 0 */,
ZSTD_compressBlock_fast,
- ZSTD_compressBlock_doubleFast,
- ZSTD_compressBlock_greedy,
- ZSTD_compressBlock_lazy,
- ZSTD_compressBlock_lazy2,
- ZSTD_compressBlock_btlazy2,
- ZSTD_compressBlock_btopt,
- ZSTD_compressBlock_btultra,
- ZSTD_compressBlock_btultra2 },
+ ZSTD_COMPRESSBLOCK_DOUBLEFAST,
+ ZSTD_COMPRESSBLOCK_GREEDY,
+ ZSTD_COMPRESSBLOCK_LAZY,
+ ZSTD_COMPRESSBLOCK_LAZY2,
+ ZSTD_COMPRESSBLOCK_BTLAZY2,
+ ZSTD_COMPRESSBLOCK_BTOPT,
+ ZSTD_COMPRESSBLOCK_BTULTRA,
+ ZSTD_COMPRESSBLOCK_BTULTRA2
+ },
{ ZSTD_compressBlock_fast_extDict /* default for 0 */,
ZSTD_compressBlock_fast_extDict,
- ZSTD_compressBlock_doubleFast_extDict,
- ZSTD_compressBlock_greedy_extDict,
- ZSTD_compressBlock_lazy_extDict,
- ZSTD_compressBlock_lazy2_extDict,
- ZSTD_compressBlock_btlazy2_extDict,
- ZSTD_compressBlock_btopt_extDict,
- ZSTD_compressBlock_btultra_extDict,
- ZSTD_compressBlock_btultra_extDict },
+ ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT,
+ ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT,
+ ZSTD_COMPRESSBLOCK_LAZY_EXTDICT,
+ ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT,
+ ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT,
+ ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT,
+ ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT,
+ ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT
+ },
{ ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
ZSTD_compressBlock_fast_dictMatchState,
- ZSTD_compressBlock_doubleFast_dictMatchState,
- ZSTD_compressBlock_greedy_dictMatchState,
- ZSTD_compressBlock_lazy_dictMatchState,
- ZSTD_compressBlock_lazy2_dictMatchState,
- ZSTD_compressBlock_btlazy2_dictMatchState,
- ZSTD_compressBlock_btopt_dictMatchState,
- ZSTD_compressBlock_btultra_dictMatchState,
- ZSTD_compressBlock_btultra_dictMatchState },
+ ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE,
+ ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE
+ },
{ NULL /* default for 0 */,
NULL,
NULL,
- ZSTD_compressBlock_greedy_dedicatedDictSearch,
- ZSTD_compressBlock_lazy_dedicatedDictSearch,
- ZSTD_compressBlock_lazy2_dedicatedDictSearch,
+ ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH,
+ ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH,
+ ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH,
NULL,
NULL,
NULL,
NULL }
};
- ZSTD_blockCompressor selectedCompressor;
+ ZSTD_BlockCompressor_f selectedCompressor;
ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
- assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
- DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat));
+ DEBUGLOG(5, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
- static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
- { ZSTD_compressBlock_greedy_row,
- ZSTD_compressBlock_lazy_row,
- ZSTD_compressBlock_lazy2_row },
- { ZSTD_compressBlock_greedy_extDict_row,
- ZSTD_compressBlock_lazy_extDict_row,
- ZSTD_compressBlock_lazy2_extDict_row },
- { ZSTD_compressBlock_greedy_dictMatchState_row,
- ZSTD_compressBlock_lazy_dictMatchState_row,
- ZSTD_compressBlock_lazy2_dictMatchState_row },
- { ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
- ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
- ZSTD_compressBlock_lazy2_dedicatedDictSearch_row }
+ static const ZSTD_BlockCompressor_f rowBasedBlockCompressors[4][3] = {
+ {
+ ZSTD_COMPRESSBLOCK_GREEDY_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY2_ROW
+ },
+ {
+ ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW
+ },
+ {
+ ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW
+ },
+ {
+ ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW,
+ ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW
+ }
};
- DEBUGLOG(4, "Selecting a row-based matchfinder");
+ DEBUGLOG(5, "Selecting a row-based matchfinder");
assert(useRowMatchFinder != ZSTD_ps_auto);
selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
} else {
@@ -2704,30 +3065,126 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
return selectedCompressor;
}
-static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr,
const BYTE* anchor, size_t lastLLSize)
{
ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
seqStorePtr->lit += lastLLSize;
}
-void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+void ZSTD_resetSeqStore(SeqStore_t* ssPtr)
{
ssPtr->lit = ssPtr->litStart;
ssPtr->sequences = ssPtr->sequencesStart;
ssPtr->longLengthType = ZSTD_llt_none;
}
-typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
+/* ZSTD_postProcessSequenceProducerResult() :
+ * Validates and post-processes sequences obtained through the external matchfinder API:
+ * - Checks whether nbExternalSeqs represents an error condition.
+ * - Appends a block delimiter to outSeqs if one is not already present.
+ * See zstd.h for context regarding block delimiters.
+ * Returns the number of sequences after post-processing, or an error code. */
+static size_t ZSTD_postProcessSequenceProducerResult(
+ ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize
+) {
+ RETURN_ERROR_IF(
+ nbExternalSeqs > outSeqsCapacity,
+ sequenceProducer_failed,
+ "External sequence producer returned error code %lu",
+ (unsigned long)nbExternalSeqs
+ );
+
+ RETURN_ERROR_IF(
+ nbExternalSeqs == 0 && srcSize > 0,
+ sequenceProducer_failed,
+ "Got zero sequences from external sequence producer for a non-empty src buffer!"
+ );
+
+ if (srcSize == 0) {
+ ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence));
+ return 1;
+ }
+
+ {
+ ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1];
+
+ /* We can return early if lastSeq is already a block delimiter. */
+ if (lastSeq.offset == 0 && lastSeq.matchLength == 0) {
+ return nbExternalSeqs;
+ }
+
+ /* This error condition is only possible if the external matchfinder
+ * produced an invalid parse, by definition of ZSTD_sequenceBound(). */
+ RETURN_ERROR_IF(
+ nbExternalSeqs == outSeqsCapacity,
+ sequenceProducer_failed,
+ "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!"
+ );
+
+ /* lastSeq is not a block delimiter, so we need to append one. */
+ ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence));
+ return nbExternalSeqs + 1;
+ }
+}
+
+/* ZSTD_fastSequenceLengthSum() :
+ * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*.
+ * Similar to another function in zstd_compress.c (determine_blockSize),
+ * except it doesn't check for a block delimiter to end summation.
+ * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P).
+ * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */
+static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) {
+ size_t matchLenSum, litLenSum, i;
+ matchLenSum = 0;
+ litLenSum = 0;
+ for (i = 0; i < seqBufSize; i++) {
+ litLenSum += seqBuf[i].litLength;
+ matchLenSum += seqBuf[i].matchLength;
+ }
+ return litLenSum + matchLenSum;
+}
+
+/*
+ * Function to validate sequences produced by a block compressor.
+ */
+static void ZSTD_validateSeqStore(const SeqStore_t* seqStore, const ZSTD_compressionParameters* cParams)
+{
+#if DEBUGLEVEL >= 1
+ const SeqDef* seq = seqStore->sequencesStart;
+ const SeqDef* const seqEnd = seqStore->sequences;
+ size_t const matchLenLowerBound = cParams->minMatch == 3 ? 3 : 4;
+ for (; seq < seqEnd; ++seq) {
+ const ZSTD_SequenceLength seqLength = ZSTD_getSequenceLength(seqStore, seq);
+ assert(seqLength.matchLength >= matchLenLowerBound);
+ (void)seqLength;
+ (void)matchLenLowerBound;
+ }
+#else
+ (void)seqStore;
+ (void)cParams;
+#endif
+}
+
+static size_t
+ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch);
+
+typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_BuildSeqStore_e;
static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
{
- ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ ZSTD_MatchState_t* const ms = &zc->blockState.matchState;
DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */
ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
- if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
+ * additional 1. We need to revisit and change this logic to be more consistent */
+ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {
if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
} else {
@@ -2763,6 +3220,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
}
if (zc->externSeqStore.pos < zc->externSeqStore.size) {
assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
+
+ /* External matchfinder + LDM is technically possible, just not implemented yet.
+ * We need to revisit soon and implement it. */
+ RETURN_ERROR_IF(
+ ZSTD_hasExtSeqProd(&zc->appliedParams),
+ parameter_combination_unsupported,
+ "Long-distance matching with external sequence producer enabled is not currently supported."
+ );
+
/* Updates ldmSeqStore.pos */
lastLLSize =
ZSTD_ldm_blockCompress(&zc->externSeqStore,
@@ -2772,7 +3238,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
src, srcSize);
assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
} else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
- rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
+ RawSeqStore_t ldmSeqStore = kNullRawSeqStore;
+
+ /* External matchfinder + LDM is technically possible, just not implemented yet.
+ * We need to revisit soon and implement it. */
+ RETURN_ERROR_IF(
+ ZSTD_hasExtSeqProd(&zc->appliedParams),
+ parameter_combination_unsupported,
+ "Long-distance matching with external sequence producer enabled is not currently supported."
+ );
ldmSeqStore.seq = zc->ldmSequences;
ldmSeqStore.capacity = zc->maxNbLdmSequences;
@@ -2788,42 +3262,116 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(ldmSeqStore.pos == ldmSeqStore.size);
- } else { /* not long range mode */
- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
- zc->appliedParams.useRowMatchFinder,
- dictMode);
+ } else if (ZSTD_hasExtSeqProd(&zc->appliedParams)) {
+ assert(
+ zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize)
+ );
+ assert(zc->appliedParams.extSeqProdFunc != NULL);
+
+ { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog;
+
+ size_t const nbExternalSeqs = (zc->appliedParams.extSeqProdFunc)(
+ zc->appliedParams.extSeqProdState,
+ zc->extSeqBuf,
+ zc->extSeqBufCapacity,
+ src, srcSize,
+ NULL, 0, /* dict and dictSize, currently not supported */
+ zc->appliedParams.compressionLevel,
+ windowSize
+ );
+
+ size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult(
+ zc->extSeqBuf,
+ nbExternalSeqs,
+ zc->extSeqBufCapacity,
+ srcSize
+ );
+
+ /* Return early if there is no error, since we don't need to worry about last literals */
+ if (!ZSTD_isError(nbPostProcessedSeqs)) {
+ ZSTD_SequencePosition seqPos = {0,0,0};
+ size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs);
+ RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!");
+ FORWARD_IF_ERROR(
+ ZSTD_transferSequences_wBlockDelim(
+ zc, &seqPos,
+ zc->extSeqBuf, nbPostProcessedSeqs,
+ src, srcSize,
+ zc->appliedParams.searchForExternalRepcodes
+ ),
+ "Failed to copy external sequences to seqStore!"
+ );
+ ms->ldmSeqStore = NULL;
+ DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs);
+ return ZSTDbss_compress;
+ }
+
+ /* Propagate the error if fallback is disabled */
+ if (!zc->appliedParams.enableMatchFinderFallback) {
+ return nbPostProcessedSeqs;
+ }
+
+ /* Fallback to software matchfinder */
+ { ZSTD_BlockCompressor_f const blockCompressor =
+ ZSTD_selectBlockCompressor(
+ zc->appliedParams.cParams.strategy,
+ zc->appliedParams.useRowMatchFinder,
+ dictMode);
+ ms->ldmSeqStore = NULL;
+ DEBUGLOG(
+ 5,
+ "External sequence producer returned error code %lu. Falling back to internal parser.",
+ (unsigned long)nbExternalSeqs
+ );
+ lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
+ } }
+ } else { /* not long range mode and no external matchfinder */
+ ZSTD_BlockCompressor_f const blockCompressor = ZSTD_selectBlockCompressor(
+ zc->appliedParams.cParams.strategy,
+ zc->appliedParams.useRowMatchFinder,
+ dictMode);
ms->ldmSeqStore = NULL;
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
}
{ const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
} }
+ ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams);
return ZSTDbss_compress;
}
-static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
+static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const SeqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM])
{
- const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
- const seqDef* seqStoreSeqs = seqStore->sequencesStart;
- size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
- size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
- size_t literalsRead = 0;
- size_t lastLLSize;
+ const SeqDef* inSeqs = seqStore->sequencesStart;
+ const size_t nbInSequences = (size_t)(seqStore->sequences - inSeqs);
+ const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart);
- ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
+ ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex;
+ const size_t nbOutSequences = nbInSequences + 1;
+ size_t nbOutLiterals = 0;
+ Repcodes_t repcodes;
size_t i;
- repcodes_t updatedRepcodes;
-
- assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
- /* Ensure we have enough space for last literals "sequence" */
- assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
- ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
- for (i = 0; i < seqStoreSeqSize; ++i) {
- U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM;
- outSeqs[i].litLength = seqStoreSeqs[i].litLength;
- outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
+
+ /* Bounds check that we have enough space for every input sequence
+ * and the block delimiter
+ */
+ assert(seqCollector->seqIndex <= seqCollector->maxSequences);
+ RETURN_ERROR_IF(
+ nbOutSequences > (size_t)(seqCollector->maxSequences - seqCollector->seqIndex),
+ dstSize_tooSmall,
+ "Not enough space to copy sequences");
+
+ ZSTD_memcpy(&repcodes, prevRepcodes, sizeof(repcodes));
+ for (i = 0; i < nbInSequences; ++i) {
+ U32 rawOffset;
+ outSeqs[i].litLength = inSeqs[i].litLength;
+ outSeqs[i].matchLength = inSeqs[i].mlBase + MINMATCH;
outSeqs[i].rep = 0;
+ /* Handle the possible single length >= 64K
+ * There can only be one because we add MINMATCH to every match length,
+ * and blocks are at most 128K.
+ */
if (i == seqStore->longLengthPos) {
if (seqStore->longLengthType == ZSTD_llt_literalLength) {
outSeqs[i].litLength += 0x10000;
@@ -2832,46 +3380,75 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
}
}
- if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
- /* Derive the correct offset corresponding to a repcode */
- outSeqs[i].rep = seqStoreSeqs[i].offBase;
+ /* Determine the raw offset given the offBase, which may be a repcode. */
+ if (OFFBASE_IS_REPCODE(inSeqs[i].offBase)) {
+ const U32 repcode = OFFBASE_TO_REPCODE(inSeqs[i].offBase);
+ assert(repcode > 0);
+ outSeqs[i].rep = repcode;
if (outSeqs[i].litLength != 0) {
- rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
+ rawOffset = repcodes.rep[repcode - 1];
} else {
- if (outSeqs[i].rep == 3) {
- rawOffset = updatedRepcodes.rep[0] - 1;
+ if (repcode == 3) {
+ assert(repcodes.rep[0] > 1);
+ rawOffset = repcodes.rep[0] - 1;
} else {
- rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
+ rawOffset = repcodes.rep[repcode];
}
}
+ } else {
+ rawOffset = OFFBASE_TO_OFFSET(inSeqs[i].offBase);
}
outSeqs[i].offset = rawOffset;
- /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
- so we provide seqStoreSeqs[i].offset - 1 */
- ZSTD_updateRep(updatedRepcodes.rep,
- seqStoreSeqs[i].offBase - 1,
- seqStoreSeqs[i].litLength == 0);
- literalsRead += outSeqs[i].litLength;
+
+ /* Update repcode history for the sequence */
+ ZSTD_updateRep(repcodes.rep,
+ inSeqs[i].offBase,
+ inSeqs[i].litLength == 0);
+
+ nbOutLiterals += outSeqs[i].litLength;
}
/* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
* If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
* for the block boundary, according to the API.
*/
- assert(seqStoreLiteralsSize >= literalsRead);
- lastLLSize = seqStoreLiteralsSize - literalsRead;
- outSeqs[i].litLength = (U32)lastLLSize;
- outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
- seqStoreSeqSize++;
- zc->seqCollector.seqIndex += seqStoreSeqSize;
+ assert(nbInLiterals >= nbOutLiterals);
+ {
+ const size_t lastLLSize = nbInLiterals - nbOutLiterals;
+ outSeqs[nbInSequences].litLength = (U32)lastLLSize;
+ outSeqs[nbInSequences].matchLength = 0;
+ outSeqs[nbInSequences].offset = 0;
+ assert(nbOutSequences == nbInSequences + 1);
+ }
+ seqCollector->seqIndex += nbOutSequences;
+ assert(seqCollector->seqIndex <= seqCollector->maxSequences);
+
+ return 0;
+}
+
+size_t ZSTD_sequenceBound(size_t srcSize) {
+ const size_t maxNbSeq = (srcSize / ZSTD_MINMATCH_MIN) + 1;
+ const size_t maxNbDelims = (srcSize / ZSTD_BLOCKSIZE_MAX_MIN) + 1;
+ return maxNbSeq + maxNbDelims;
}
size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize)
{
const size_t dstCapacity = ZSTD_compressBound(srcSize);
- void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
+ void* dst; /* Make C90 happy. */
SeqCollector seqCollector;
+ {
+ int targetCBlockSize;
+ FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_targetCBlockSize, &targetCBlockSize), "");
+ RETURN_ERROR_IF(targetCBlockSize != 0, parameter_unsupported, "targetCBlockSize != 0");
+ }
+ {
+ int nbWorkers;
+ FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers), "");
+ RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0");
+ }
+ dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
seqCollector.collectSequences = 1;
@@ -2880,8 +3457,12 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
seqCollector.maxSequences = outSeqsSize;
zc->seqCollector = seqCollector;
- ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
- ZSTD_customFree(dst, ZSTD_defaultCMem);
+ {
+ const size_t ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
+ ZSTD_customFree(dst, ZSTD_defaultCMem);
+ FORWARD_IF_ERROR(ret, "ZSTD_compress2 failed");
+ }
+ assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize));
return zc->seqCollector.seqIndex;
}
@@ -2910,19 +3491,17 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) {
const size_t unrollMask = unrollSize - 1;
const size_t prefixLength = length & unrollMask;
size_t i;
- size_t u;
if (length == 1) return 1;
/* Check if prefix is RLE first before using unrolled loop */
if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
return 0;
}
for (i = prefixLength; i != length; i += unrollSize) {
+ size_t u;
for (u = 0; u < unrollSize; u += sizeof(size_t)) {
if (MEM_readST(ip + i + u) != valueST) {
return 0;
- }
- }
- }
+ } } }
return 1;
}
@@ -2930,7 +3509,7 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) {
* This is just a heuristic based on the compressibility.
* It may return both false positives and false negatives.
*/
-static int ZSTD_maybeRLE(seqStore_t const* seqStore)
+static int ZSTD_maybeRLE(SeqStore_t const* seqStore)
{
size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
@@ -2938,7 +3517,8 @@ static int ZSTD_maybeRLE(seqStore_t const* seqStore)
return nbSeqs < 4 && nbLits < 10;
}
-static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
+static void
+ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
{
ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
bs->prevCBlock = bs->nextCBlock;
@@ -2946,12 +3526,14 @@ static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* c
}
/* Writes the block header */
-static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) {
+static void
+writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock)
+{
U32 const cBlockHeader = cSize == 1 ?
lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(op, cBlockHeader);
- DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
+ DEBUGLOG(5, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
}
/* ZSTD_buildBlockEntropyStats_literals() :
@@ -2959,13 +3541,16 @@ static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastB
* Stores literals block type (raw, rle, compressed, repeat) and
* huffman description table to hufMetadata.
* Requires ENTROPY_WORKSPACE_SIZE workspace
- * @return : size of huffman description table or error code */
-static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
- const ZSTD_hufCTables_t* prevHuf,
- ZSTD_hufCTables_t* nextHuf,
- ZSTD_hufCTablesMetadata_t* hufMetadata,
- const int literalsCompressionIsDisabled,
- void* workspace, size_t wkspSize)
+ * @return : size of huffman description table, or an error code
+ */
+static size_t
+ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const int literalsCompressionIsDisabled,
+ void* workspace, size_t wkspSize,
+ int hufFlags)
{
BYTE* const wkspStart = (BYTE*)workspace;
BYTE* const wkspEnd = wkspStart + wkspSize;
@@ -2973,9 +3558,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi
unsigned* const countWksp = (unsigned*)workspace;
const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
BYTE* const nodeWksp = countWkspStart + countWkspSize;
- const size_t nodeWkspSize = wkspEnd-nodeWksp;
+ const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp);
unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
- unsigned huffLog = HUF_TABLELOG_DEFAULT;
+ unsigned huffLog = LitHufLog;
HUF_repeat repeat = prevHuf->repeatMode;
DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
@@ -2990,73 +3575,77 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi
/* small ? don't even attempt compression (speed opt) */
#ifndef COMPRESS_LITERALS_SIZE_MIN
-#define COMPRESS_LITERALS_SIZE_MIN 63
+# define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */
#endif
{ size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
if (srcSize <= minLitSize) {
DEBUGLOG(5, "set_basic - too small");
hufMetadata->hType = set_basic;
return 0;
- }
- }
+ } }
/* Scan input and build symbol stats */
- { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
+ { size_t const largest =
+ HIST_count_wksp (countWksp, &maxSymbolValue,
+ (const BYTE*)src, srcSize,
+ workspace, wkspSize);
FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
if (largest == srcSize) {
+ /* only one literal symbol */
DEBUGLOG(5, "set_rle");
hufMetadata->hType = set_rle;
return 0;
}
if (largest <= (srcSize >> 7)+4) {
+ /* heuristic: likely not compressible */
DEBUGLOG(5, "set_basic - no gain");
hufMetadata->hType = set_basic;
return 0;
- }
- }
+ } }
/* Validate the previous Huffman table */
- if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
+ if (repeat == HUF_repeat_check
+ && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
repeat = HUF_repeat_none;
}
/* Build Huffman Tree */
ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
- huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags);
+ assert(huffLog <= LitHufLog);
{ size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
maxSymbolValue, huffLog,
nodeWksp, nodeWkspSize);
FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
huffLog = (U32)maxBits;
- { /* Build and write the CTable */
- size_t const newCSize = HUF_estimateCompressedSize(
- (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
- size_t const hSize = HUF_writeCTable_wksp(
- hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
- (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
- nodeWksp, nodeWkspSize);
- /* Check against repeating the previous CTable */
- if (repeat != HUF_repeat_none) {
- size_t const oldCSize = HUF_estimateCompressedSize(
- (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
- if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
- DEBUGLOG(5, "set_repeat - smaller");
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- hufMetadata->hType = set_repeat;
- return 0;
- }
- }
- if (newCSize + hSize >= srcSize) {
- DEBUGLOG(5, "set_basic - no gains");
+ }
+ { /* Build and write the CTable */
+ size_t const newCSize = HUF_estimateCompressedSize(
+ (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
+ size_t const hSize = HUF_writeCTable_wksp(
+ hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
+ (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ /* Check against repeating the previous CTable */
+ if (repeat != HUF_repeat_none) {
+ size_t const oldCSize = HUF_estimateCompressedSize(
+ (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
+ if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
+ DEBUGLOG(5, "set_repeat - smaller");
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- hufMetadata->hType = set_basic;
+ hufMetadata->hType = set_repeat;
return 0;
- }
- DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
- hufMetadata->hType = set_compressed;
- nextHuf->repeatMode = HUF_repeat_check;
- return hSize;
+ } }
+ if (newCSize + hSize >= srcSize) {
+ DEBUGLOG(5, "set_basic - no gains");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_basic;
+ return 0;
}
+ DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
+ hufMetadata->hType = set_compressed;
+ nextHuf->repeatMode = HUF_repeat_check;
+ return hSize;
}
}
@@ -3066,8 +3655,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi
* and updates nextEntropy to the appropriate repeatMode.
*/
static ZSTD_symbolEncodingTypeStats_t
-ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {
- ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0};
+ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy)
+{
+ ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0};
nextEntropy->litlength_repeatMode = FSE_repeat_none;
nextEntropy->offcode_repeatMode = FSE_repeat_none;
nextEntropy->matchlength_repeatMode = FSE_repeat_none;
@@ -3078,16 +3668,18 @@ ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {
* Builds entropy for the sequences.
* Stores symbol compression modes and fse table to fseMetadata.
* Requires ENTROPY_WORKSPACE_SIZE wksp.
- * @return : size of fse tables or error code */
-static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
- const ZSTD_fseCTables_t* prevEntropy,
- ZSTD_fseCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- ZSTD_fseCTablesMetadata_t* fseMetadata,
- void* workspace, size_t wkspSize)
+ * @return : size of fse tables or error code */
+static size_t
+ZSTD_buildBlockEntropyStats_sequences(
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_fseCTables_t* prevEntropy,
+ ZSTD_fseCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize)
{
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
- size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
BYTE* const ostart = fseMetadata->fseTablesBuffer;
BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
BYTE* op = ostart;
@@ -3103,9 +3695,9 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
entropyWorkspace, entropyWorkspaceSize)
: ZSTD_buildDummySequencesStatistics(nextEntropy);
FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
- fseMetadata->llType = (symbolEncodingType_e) stats.LLtype;
- fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype;
- fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype;
+ fseMetadata->llType = (SymbolEncodingType_e) stats.LLtype;
+ fseMetadata->ofType = (SymbolEncodingType_e) stats.Offtype;
+ fseMetadata->mlType = (SymbolEncodingType_e) stats.MLtype;
fseMetadata->lastCountSize = stats.lastCountSize;
return stats.size;
}
@@ -3114,23 +3706,28 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
/* ZSTD_buildBlockEntropyStats() :
* Builds entropy for the block.
* Requires workspace size ENTROPY_WORKSPACE_SIZE
- *
- * @return : 0 on success or error code
+ * @return : 0 on success, or an error code
+ * Note : also employed in superblock
*/
-size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
- const ZSTD_entropyCTables_t* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- ZSTD_entropyCTablesMetadata_t* entropyMetadata,
- void* workspace, size_t wkspSize)
-{
- size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
+size_t ZSTD_buildBlockEntropyStats(
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize)
+{
+ size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart);
+ int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD);
+ int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0;
+
entropyMetadata->hufMetadata.hufDesSize =
ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,
&prevEntropy->huf, &nextEntropy->huf,
&entropyMetadata->hufMetadata,
ZSTD_literalsCompressionIsDisabled(cctxParams),
- workspace, wkspSize);
+ workspace, wkspSize, hufFlags);
+
FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
entropyMetadata->fseMetadata.fseTablesSize =
ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
@@ -3143,11 +3740,12 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
}
/* Returns the size estimate for the literals section (header + content) of a block */
-static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
- const ZSTD_hufCTables_t* huf,
- const ZSTD_hufCTablesMetadata_t* hufMetadata,
- void* workspace, size_t wkspSize,
- int writeEntropy)
+static size_t
+ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
+ const ZSTD_hufCTables_t* huf,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
{
unsigned* const countWksp = (unsigned*)workspace;
unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
@@ -3169,12 +3767,13 @@ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSiz
}
/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
-static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
- const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
- const FSE_CTable* fseCTable,
- const U8* additionalBits,
- short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
- void* workspace, size_t wkspSize)
+static size_t
+ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type,
+ const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
+ const FSE_CTable* fseCTable,
+ const U8* additionalBits,
+ short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ void* workspace, size_t wkspSize)
{
unsigned* const countWksp = (unsigned*)workspace;
const BYTE* ctp = codeTable;
@@ -3206,116 +3805,121 @@ static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
}
/* Returns the size estimate for the sequences section (header + content) of a block */
-static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
- const BYTE* llCodeTable,
- const BYTE* mlCodeTable,
- size_t nbSeq,
- const ZSTD_fseCTables_t* fseTables,
- const ZSTD_fseCTablesMetadata_t* fseMetadata,
- void* workspace, size_t wkspSize,
- int writeEntropy)
+static size_t
+ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
{
size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);
size_t cSeqSizeEstimate = 0;
cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff,
- fseTables->offcodeCTable, NULL,
- OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
- workspace, wkspSize);
+ fseTables->offcodeCTable, NULL,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL,
- fseTables->litlengthCTable, LL_bits,
- LL_defaultNorm, LL_defaultNormLog, MaxLL,
- workspace, wkspSize);
+ fseTables->litlengthCTable, LL_bits,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML,
- fseTables->matchlengthCTable, ML_bits,
- ML_defaultNorm, ML_defaultNormLog, MaxML,
- workspace, wkspSize);
+ fseTables->matchlengthCTable, ML_bits,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ workspace, wkspSize);
if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
return cSeqSizeEstimate + sequencesSectionHeaderSize;
}
/* Returns the size estimate for a given stream of literals, of, ll, ml */
-static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
- const BYTE* ofCodeTable,
- const BYTE* llCodeTable,
- const BYTE* mlCodeTable,
- size_t nbSeq,
- const ZSTD_entropyCTables_t* entropy,
- const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
- void* workspace, size_t wkspSize,
- int writeLitEntropy, int writeSeqEntropy) {
+static size_t
+ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
+ const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize,
+ int writeLitEntropy, int writeSeqEntropy)
+{
size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize,
- &entropy->huf, &entropyMetadata->hufMetadata,
- workspace, wkspSize, writeLitEntropy);
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
- nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
- workspace, wkspSize, writeSeqEntropy);
+ nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
+ workspace, wkspSize, writeSeqEntropy);
return seqSize + literalsSize + ZSTD_blockHeaderSize;
}
/* Builds entropy statistics and uses them for blocksize estimation.
*
- * Returns the estimated compressed size of the seqStore, or a zstd error.
+ * @return: estimated compressed size of the seqStore, or a zstd error.
*/
-static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) {
- ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
+static size_t
+ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx* zc)
+{
+ ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");
FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,
&zc->blockState.prevCBlock->entropy,
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
entropyMetadata,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
- return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
+ zc->tmpWorkspace, zc->tmpWkspSize), "");
+ return ZSTD_estimateBlockSize(
+ seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
(size_t)(seqStore->sequences - seqStore->sequencesStart),
- &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE,
+ &zc->blockState.nextCBlock->entropy,
+ entropyMetadata,
+ zc->tmpWorkspace, zc->tmpWkspSize,
(int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);
}
/* Returns literals bytes represented in a seqStore */
-static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) {
+static size_t ZSTD_countSeqStoreLiteralsBytes(const SeqStore_t* const seqStore)
+{
size_t literalsBytes = 0;
- size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
+ size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t i;
for (i = 0; i < nbSeqs; ++i) {
- seqDef seq = seqStore->sequencesStart[i];
+ SeqDef const seq = seqStore->sequencesStart[i];
literalsBytes += seq.litLength;
if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
literalsBytes += 0x10000;
- }
- }
+ } }
return literalsBytes;
}
/* Returns match bytes represented in a seqStore */
-static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
+static size_t ZSTD_countSeqStoreMatchBytes(const SeqStore_t* const seqStore)
+{
size_t matchBytes = 0;
- size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
+ size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t i;
for (i = 0; i < nbSeqs; ++i) {
- seqDef seq = seqStore->sequencesStart[i];
+ SeqDef seq = seqStore->sequencesStart[i];
matchBytes += seq.mlBase + MINMATCH;
if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
matchBytes += 0x10000;
- }
- }
+ } }
return matchBytes;
}
/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
* Stores the result in resultSeqStore.
*/
-static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
- const seqStore_t* originalSeqStore,
- size_t startIdx, size_t endIdx) {
- BYTE* const litEnd = originalSeqStore->lit;
- size_t literalsBytes;
- size_t literalsBytesPreceding = 0;
-
+static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore,
+ const SeqStore_t* originalSeqStore,
+ size_t startIdx, size_t endIdx)
+{
*resultSeqStore = *originalSeqStore;
if (startIdx > 0) {
resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
- literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
}
/* Move longLengthPos into the correct position if necessary */
@@ -3328,13 +3932,12 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
}
resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
- literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
- resultSeqStore->litStart += literalsBytesPreceding;
if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
/* This accounts for possible last literals if the derived chunk reaches the end of the block */
- resultSeqStore->lit = litEnd;
+ assert(resultSeqStore->lit == originalSeqStore->lit);
} else {
- resultSeqStore->lit = resultSeqStore->litStart+literalsBytes;
+ size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ resultSeqStore->lit = resultSeqStore->litStart + literalsBytes;
}
resultSeqStore->llCode += startIdx;
resultSeqStore->mlCode += startIdx;
@@ -3342,20 +3945,26 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
}
/*
- * Returns the raw offset represented by the combination of offCode, ll0, and repcode history.
- * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq().
+ * Returns the raw offset represented by the combination of offBase, ll0, and repcode history.
+ * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq().
*/
static U32
-ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0)
-{
- U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */
- assert(STORED_IS_REPCODE(offCode));
- if (adjustedOffCode == ZSTD_REP_NUM) {
- /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */
- assert(rep[0] > 0);
+ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0)
+{
+ U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */
+ assert(OFFBASE_IS_REPCODE(offBase));
+ if (adjustedRepCode == ZSTD_REP_NUM) {
+ assert(ll0);
+ /* litlength == 0 and offCode == 2 implies selection of first repcode - 1
+ * This is only valid if it results in a valid offset value, aka > 0.
+ * Note : it may happen that `rep[0]==1` in exceptional circumstances.
+ * In which case this function will return 0, which is an invalid offset.
+ * It's not an issue though, since this value will be
+ * compared and discarded within ZSTD_seqStore_resolveOffCodes().
+ */
return rep[0] - 1;
}
- return rep[adjustedOffCode];
+ return rep[adjustedRepCode];
}
/*
@@ -3371,30 +3980,33 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, c
* 1-3 : repcode 1-3
* 4+ : real_offset+3
*/
-static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
- seqStore_t* const seqStore, U32 const nbSeq) {
+static void
+ZSTD_seqStore_resolveOffCodes(Repcodes_t* const dRepcodes, Repcodes_t* const cRepcodes,
+ const SeqStore_t* const seqStore, U32 const nbSeq)
+{
U32 idx = 0;
+ U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq;
for (; idx < nbSeq; ++idx) {
- seqDef* const seq = seqStore->sequencesStart + idx;
- U32 const ll0 = (seq->litLength == 0);
- U32 const offCode = OFFBASE_TO_STORED(seq->offBase);
- assert(seq->offBase > 0);
- if (STORED_IS_REPCODE(offCode)) {
- U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0);
- U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0);
+ SeqDef* const seq = seqStore->sequencesStart + idx;
+ U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx);
+ U32 const offBase = seq->offBase;
+ assert(offBase > 0);
+ if (OFFBASE_IS_REPCODE(offBase)) {
+ U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0);
+ U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0);
/* Adjust simulated decompression repcode history if we come across a mismatch. Replace
* the repcode with the offset it actually references, determined by the compression
* repcode history.
*/
if (dRawOffset != cRawOffset) {
- seq->offBase = cRawOffset + ZSTD_REP_NUM;
+ seq->offBase = OFFSET_TO_OFFBASE(cRawOffset);
}
}
/* Compression repcode history is always updated with values directly from the unmodified seqStore.
* Decompression repcode history may use modified seq->offset value taken from compression repcode history.
*/
- ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0);
- ZSTD_updateRep(cRepcodes->rep, offCode, ll0);
+ ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0);
+ ZSTD_updateRep(cRepcodes->rep, offBase, ll0);
}
}
@@ -3404,10 +4016,11 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_
* Returns the total size of that block (including header) or a ZSTD error code.
*/
static size_t
-ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
- repcodes_t* const dRep, repcodes_t* const cRep,
+ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc,
+ const SeqStore_t* const seqStore,
+ Repcodes_t* const dRep, Repcodes_t* const cRep,
void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
+ const void* src, size_t srcSize,
U32 lastBlock, U32 isPartition)
{
const U32 rleMaxLength = 25;
@@ -3417,7 +4030,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
size_t cSeqsSize;
/* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
- repcodes_t const dRepOriginal = *dRep;
+ Repcodes_t const dRepOriginal = *dRep;
DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
if (isPartition)
ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
@@ -3428,7 +4041,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
&zc->appliedParams,
op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
srcSize,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */,
zc->bmi2);
FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
@@ -3442,8 +4055,9 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
cSeqsSize = 1;
}
+ /* Sequence collection not supported when block splitting */
if (zc->seqCollector.collectSequences) {
- ZSTD_copyBlockSequences(zc);
+ FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep), "copyBlockSequences failed");
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return 0;
}
@@ -3451,18 +4065,18 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
if (cSeqsSize == 0) {
cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "Nocompress block failed");
- DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize);
+ DEBUGLOG(5, "Writing out nocompress block, size: %zu", cSize);
*dRep = dRepOriginal; /* reset simulated decompression repcode history */
} else if (cSeqsSize == 1) {
cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "RLE compress block failed");
- DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize);
+ DEBUGLOG(5, "Writing out RLE block, size: %zu", cSize);
*dRep = dRepOriginal; /* reset simulated decompression repcode history */
} else {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
cSize = ZSTD_blockHeaderSize + cSeqsSize;
- DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize);
+ DEBUGLOG(5, "Writing out compressed block, size: %zu", cSize);
}
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
@@ -3481,45 +4095,49 @@ typedef struct {
/* Helper function to perform the recursive search for block splits.
* Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
- * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then
- * we do not recurse.
+ * If advantageous to split, then we recurse down the two sub-blocks.
+ * If not, or if an error occurred in estimation, then we do not recurse.
*
- * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING.
+ * Note: The recursion depth is capped by a heuristic minimum number of sequences,
+ * defined by MIN_SEQUENCES_BLOCK_SPLITTING.
* In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).
* In practice, recursion depth usually doesn't go beyond 4.
*
- * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
+ * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS.
+ * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
* maximum of 128 KB, this value is actually impossible to reach.
*/
static void
ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
- ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
+ ZSTD_CCtx* zc, const SeqStore_t* origSeqStore)
{
- seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
- seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
- seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
+ SeqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
+ SeqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
+ SeqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
size_t estimatedOriginalSize;
size_t estimatedFirstHalfSize;
size_t estimatedSecondHalfSize;
size_t midIdx = (startIdx + endIdx)/2;
+ DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);
+ assert(endIdx >= startIdx);
if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) {
- DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences");
+ DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx);
return;
}
- DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);
ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx);
ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx);
ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx);
estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc);
estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc);
estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc);
- DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
+ DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);
if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {
return;
}
if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) {
+ DEBUGLOG(5, "split decided at seqNb:%zu", midIdx);
ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
splits->splitLocations[splits->idx] = (U32)midIdx;
splits->idx++;
@@ -3527,14 +4145,18 @@ ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t end
}
}
-/* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio.
+/* Base recursive function.
+ * Populates a table with intra-block partition indices that can improve compression ratio.
*
- * Returns the number of splits made (which equals the size of the partition table - 1).
+ * @return: number of splits made (which equals the size of the partition table - 1).
*/
-static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) {
- seqStoreSplits splits = {partitions, 0};
+static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)
+{
+ seqStoreSplits splits;
+ splits.splitLocations = partitions;
+ splits.idx = 0;
if (nbSeq <= 4) {
- DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split");
+ DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq);
/* Refuse to try and split anything with less than 4 sequences */
return 0;
}
@@ -3550,18 +4172,20 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)
* Returns combined size of all blocks (which includes headers), or a ZSTD error code.
*/
static size_t
-ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity,
- const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq)
+ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t blockSize,
+ U32 lastBlock, U32 nbSeq)
{
size_t cSize = 0;
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
size_t i = 0;
size_t srcBytesTotal = 0;
- U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
- seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
- seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore;
- size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
+ U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
+ SeqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
+ SeqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore;
+ size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
/* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
* may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
@@ -3577,36 +4201,37 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac
*
* See ZSTD_seqStore_resolveOffCodes() for more details.
*/
- repcodes_t dRep;
- repcodes_t cRep;
- ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
- ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
- ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t));
+ Repcodes_t dRep;
+ Repcodes_t cRep;
+ ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+ ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+ ZSTD_memset(nextSeqStore, 0, sizeof(SeqStore_t));
- DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
(unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
(unsigned)zc->blockState.matchState.nextToUpdate);
if (numSplits == 0) {
- size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
- &dRep, &cRep,
- op, dstCapacity,
- ip, blockSize,
- lastBlock, 0 /* isPartition */);
+ size_t cSizeSingleBlock =
+ ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
+ &dRep, &cRep,
+ op, dstCapacity,
+ ip, blockSize,
+ lastBlock, 0 /* isPartition */);
FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
- assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
+ assert(zc->blockSizeMax <= ZSTD_BLOCKSIZE_MAX);
+ assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize);
return cSizeSingleBlock;
}
ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]);
for (i = 0; i <= numSplits; ++i) {
- size_t srcBytes;
size_t cSizeChunk;
U32 const lastPartition = (i == numSplits);
U32 lastBlockEntireSrc = 0;
- srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);
+ size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);
srcBytesTotal += srcBytes;
if (lastPartition) {
/* This is the final partition, need to account for possible last literals */
@@ -3621,7 +4246,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac
op, dstCapacity,
ip, srcBytes,
lastBlockEntireSrc, 1 /* isPartition */);
- DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);
+ DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size",
+ ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);
FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
ip += srcBytes;
@@ -3629,12 +4255,12 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac
dstCapacity -= cSizeChunk;
cSize += cSizeChunk;
*currSeqStore = *nextSeqStore;
- assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
+ assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize);
}
- /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes
- * for the next block.
+ /* cRep and dRep may have diverged during the compression.
+ * If so, we use the dRep repcodes for the next block.
*/
- ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
+ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(Repcodes_t));
return cSize;
}
@@ -3643,21 +4269,20 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 lastBlock)
{
- const BYTE* ip = (const BYTE*)src;
- BYTE* op = (BYTE*)dst;
U32 nbSeq;
size_t cSize;
- DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
- assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable);
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock");
+ assert(zc->appliedParams.postBlockSplitter == ZSTD_ps_enable);
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
if (bss == ZSTDbss_noCompress) {
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
- cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
+ RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block");
+ cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
- DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock: Nocompress block");
return cSize;
}
nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
@@ -3673,9 +4298,9 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 frame)
{
- /* This the upper bound for the length of an rle block.
- * This isn't the actual upper bound. Finding the real threshold
- * needs further investigation.
+ /* This is an estimated upper bound for the length of an rle block.
+ * This isn't the actual upper bound.
+ * Finding the real threshold needs further investigation.
*/
const U32 rleMaxLength = 25;
size_t cSize;
@@ -3687,11 +4312,15 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
- if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
+ if (bss == ZSTDbss_noCompress) {
+ RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block");
+ cSize = 0;
+ goto out;
+ }
}
if (zc->seqCollector.collectSequences) {
- ZSTD_copyBlockSequences(zc);
+ FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep), "copyBlockSequences failed");
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return 0;
}
@@ -3702,7 +4331,7 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
&zc->appliedParams,
dst, dstCapacity,
srcSize,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */,
zc->bmi2);
if (frame &&
@@ -3767,10 +4396,11 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
* * cSize >= blockBound(srcSize): We have expanded the block too much so
* emit an uncompressed block.
*/
- {
- size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
+ { size_t const cSize =
+ ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
if (cSize != ERROR(dstSize_tooSmall)) {
- size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
+ size_t const maxCSize =
+ srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
@@ -3778,7 +4408,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
}
}
}
- }
+ } /* if (bss == ZSTDbss_compress)*/
DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
/* Superblock compression failed, attempt to emit a single no compress block.
@@ -3807,7 +4437,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
return cSize;
}
-static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
void const* ip,
@@ -3831,39 +4461,82 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
}
}
+#include "zstd_preSplit.h"
+
+static size_t ZSTD_optimalBlockSize(ZSTD_CCtx* cctx, const void* src, size_t srcSize, size_t blockSizeMax, int splitLevel, ZSTD_strategy strat, S64 savings)
+{
+ /* split level based on compression strategy, from `fast` to `btultra2` */
+ static const int splitLevels[] = { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 };
+ /* note: conservatively only split full blocks (128 KB) currently.
+ * While it's possible to go lower, let's keep it simple for a first implementation.
+ * Besides, benefits of splitting are reduced when blocks are already small.
+ */
+ if (srcSize < 128 KB || blockSizeMax < 128 KB)
+ return MIN(srcSize, blockSizeMax);
+ /* do not split incompressible data though:
+ * require verified savings to allow pre-splitting.
+ * Note: as a consequence, the first full block is not split.
+ */
+ if (savings < 3) {
+ DEBUGLOG(6, "don't attempt splitting: savings (%i) too low", (int)savings);
+ return 128 KB;
+ }
+ /* apply @splitLevel, or use default value (which depends on @strat).
+ * note that splitting heuristic is still conditioned by @savings >= 3,
+ * so the first block will not reach this code path */
+ if (splitLevel == 1) return 128 KB;
+ if (splitLevel == 0) {
+ assert(ZSTD_fast <= strat && strat <= ZSTD_btultra2);
+ splitLevel = splitLevels[strat];
+ } else {
+ assert(2 <= splitLevel && splitLevel <= 6);
+ splitLevel -= 2;
+ }
+ return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize);
+}
+
/*! ZSTD_compress_frameChunk() :
* Compress a chunk of data into one or multiple blocks.
* All blocks will be terminated, all input will be consumed.
* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
* Frame is supposed already started (header already produced)
-* @return : compressed size, or an error code
+* @return : compressed size, or an error code
*/
static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastFrameChunk)
{
- size_t blockSize = cctx->blockSize;
+ size_t blockSizeMax = cctx->blockSizeMax;
size_t remaining = srcSize;
const BYTE* ip = (const BYTE*)src;
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
+ S64 savings = (S64)cctx->consumedSrcSize - (S64)cctx->producedCSize;
assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
- DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
+ DEBUGLOG(5, "ZSTD_compress_frameChunk (srcSize=%u, blockSizeMax=%u)", (unsigned)srcSize, (unsigned)blockSizeMax);
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
xxh64_update(&cctx->xxhState, src, srcSize);
while (remaining) {
- ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
- U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
-
- RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
+ ZSTD_MatchState_t* const ms = &cctx->blockState.matchState;
+ size_t const blockSize = ZSTD_optimalBlockSize(cctx,
+ ip, remaining,
+ blockSizeMax,
+ cctx->appliedParams.preBlockSplitter_level,
+ cctx->appliedParams.cParams.strategy,
+ savings);
+ U32 const lastBlock = lastFrameChunk & (blockSize == remaining);
+ assert(blockSize <= remaining);
+
+ /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
+ * additional 1. We need to revisit and change this logic to be more consistent */
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1,
dstSize_tooSmall,
"not enough space to store compressed block");
- if (remaining < blockSize) blockSize = remaining;
ZSTD_overflowCorrectIfNeeded(
ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
@@ -3899,8 +4572,23 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
MEM_writeLE24(op, cBlockHeader);
cSize += ZSTD_blockHeaderSize;
}
- }
-
+ } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/
+
+ /* @savings is employed to ensure that splitting doesn't worsen expansion of incompressible data.
+ * Without splitting, the maximum expansion is 3 bytes per full block.
+ * An adversarial input could attempt to fudge the split detector,
+ * and make it split incompressible data, resulting in more block headers.
+ * Note that, since ZSTD_COMPRESSBOUND() assumes a worst case scenario of 1KB per block,
+ * and the splitter never creates blocks that small (current lower limit is 8 KB),
+ * there is already no risk to expand beyond ZSTD_COMPRESSBOUND() limit.
+ * But if the goal is to not expand by more than 3-bytes per 128 KB full block,
+ * then yes, it becomes possible to make the block splitter oversplit incompressible data.
+ * Using @savings, we enforce an even more conservative condition,
+ * requiring the presence of enough savings (at least 3 bytes) to authorize splitting,
+ * otherwise only full blocks are used.
+ * But being conservative is fine,
+ * since splitting barely compressible blocks is not fruitful anyway */
+ savings += (S64)blockSize - (S64)cSize;
ip += blockSize;
assert(remaining >= blockSize);
@@ -3919,8 +4607,10 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
- const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
-{ BYTE* const op = (BYTE*)dst;
+ const ZSTD_CCtx_params* params,
+ U64 pledgedSrcSize, U32 dictID)
+{
+ BYTE* const op = (BYTE*)dst;
U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
U32 const checksumFlag = params->fParams.checksumFlag>0;
@@ -4001,19 +4691,15 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
}
}
-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
+void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
{
- RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
- "wrong cctx stage");
- RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable,
- parameter_unsupported,
- "incompatible with ldm");
+ assert(cctx->stage == ZSTDcs_init);
+ assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_ps_enable);
cctx->externSeqStore.seq = seq;
cctx->externSeqStore.size = nbSeq;
cctx->externSeqStore.capacity = nbSeq;
cctx->externSeqStore.pos = 0;
cctx->externSeqStore.posInSequence = 0;
- return 0;
}
@@ -4022,7 +4708,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
const void* src, size_t srcSize,
U32 frame, U32 lastFrameChunk)
{
- ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ ZSTD_MatchState_t* const ms = &cctx->blockState.matchState;
size_t fhSize = 0;
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
@@ -4057,7 +4743,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
src, (BYTE const*)src + srcSize);
}
- DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
+ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSizeMax);
{ size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
@@ -4078,58 +4764,90 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
}
}
-size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
+size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
}
+/* NOTE: Must just wrap ZSTD_compressContinue_public() */
+size_t ZSTD_compressContinue(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize);
+}
-size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+static size_t ZSTD_getBlockSize_deprecated(const ZSTD_CCtx* cctx)
{
ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
assert(!ZSTD_checkCParams(cParams));
- return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
+ return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog);
}
-size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+/* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */
+size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+{
+ return ZSTD_getBlockSize_deprecated(cctx);
+}
+
+/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */
+size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
- { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
+ { size_t const blockSizeMax = ZSTD_getBlockSize_deprecated(cctx);
RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
}
+/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */
+size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize);
+}
+
/*! ZSTD_loadDictionaryContent() :
* @return : 0, or an error code
*/
-static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
- ldmState_t* ls,
- ZSTD_cwksp* ws,
- ZSTD_CCtx_params const* params,
- const void* src, size_t srcSize,
- ZSTD_dictTableLoadMethod_e dtlm)
+static size_t
+ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms,
+ ldmState_t* ls,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ const void* src, size_t srcSize,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp)
{
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL;
- /* Assert that we the ms params match the params we're being given */
+ /* Assert that the ms params match the params we're being given */
ZSTD_assertEqualCParams(params->cParams, ms->cParams);
- if (srcSize > ZSTD_CHUNKSIZE_MAX) {
+ { /* Ensure large dictionaries can't cause index overflow */
+
/* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.
* Dictionaries right at the edge will immediately trigger overflow
* correction, but I don't want to insert extra constraints here.
*/
- U32 const maxDictSize = ZSTD_CURRENT_MAX - 1;
- /* We must have cleared our windows when our source is this large. */
- assert(ZSTD_window_isEmpty(ms->window));
- if (loadLdmDict)
- assert(ZSTD_window_isEmpty(ls->window));
+ U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX;
+
+ int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(&params->cParams);
+ if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) {
+ /* Some dictionary matchfinders in zstd use "short cache",
+ * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each
+ * CDict hashtable entry as a tag rather than as part of an index.
+ * When short cache is used, we need to truncate the dictionary
+ * so that its indices don't overlap with the tag. */
+ U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX;
+ maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize);
+ assert(!loadLdmDict);
+ }
+
/* If the dictionary is too large, only load the suffix of the dictionary. */
if (srcSize > maxDictSize) {
ip = iend - maxDictSize;
@@ -4138,35 +4856,59 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
}
}
- DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
+ if (srcSize > ZSTD_CHUNKSIZE_MAX) {
+ /* We must have cleared our windows when our source is this large. */
+ assert(ZSTD_window_isEmpty(ms->window));
+ if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window));
+ }
ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
- ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
- ms->forceNonContiguous = params->deterministicRefPrefix;
- if (loadLdmDict) {
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent: useRowMatchFinder=%d", (int)params->useRowMatchFinder);
+
+ if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent: Trigger loadLdmDict");
ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
+ ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams);
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent: ZSTD_ldm_fillHashTable completes");
+ }
+
+ /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */
+ { U32 maxDictSize = 1U << MIN(MAX(params->cParams.hashLog + 3, params->cParams.chainLog + 1), 31);
+ if (srcSize > maxDictSize) {
+ ip = iend - maxDictSize;
+ src = ip;
+ srcSize = maxDictSize;
+ }
}
+ ms->nextToUpdate = (U32)(ip - ms->window.base);
+ ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+ ms->forceNonContiguous = params->deterministicRefPrefix;
+
if (srcSize <= HASH_READ_SIZE) return 0;
ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend);
- if (loadLdmDict)
- ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams);
-
switch(params->cParams.strategy)
{
case ZSTD_fast:
- ZSTD_fillHashTable(ms, iend, dtlm);
+ ZSTD_fillHashTable(ms, iend, dtlm, tfp);
break;
case ZSTD_dfast:
- ZSTD_fillDoubleHashTable(ms, iend, dtlm);
+#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+ ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp);
+#else
+ assert(0); /* shouldn't be called: cparams should've been adjusted. */
+#endif
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
+#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR)
assert(srcSize >= HASH_READ_SIZE);
if (ms->dedicatedDictSearch) {
assert(ms->chainTable != NULL);
@@ -4174,7 +4916,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
} else {
assert(params->useRowMatchFinder != ZSTD_ps_auto);
if (params->useRowMatchFinder == ZSTD_ps_enable) {
- size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16);
+ size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog);
ZSTD_memset(ms->tagTable, 0, tagTableSize);
ZSTD_row_update(ms, iend-HASH_READ_SIZE);
DEBUGLOG(4, "Using row-based hash table for lazy dict");
@@ -4183,14 +4925,24 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
DEBUGLOG(4, "Using chain-based hash table for lazy dict");
}
}
+#else
+ assert(0); /* shouldn't be called: cparams should've been adjusted. */
+#endif
break;
case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
+#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
assert(srcSize >= HASH_READ_SIZE);
+ DEBUGLOG(4, "Fill %u bytes into the Binary Tree", (unsigned)srcSize);
ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
+#else
+ assert(0); /* shouldn't be called: cparams should've been adjusted. */
+#endif
break;
default:
@@ -4233,20 +4985,19 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
{ unsigned maxSymbolValue = 255;
unsigned hasZeroWeights = 1;
size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
- dictEnd-dictPtr, &hasZeroWeights);
+ (size_t)(dictEnd-dictPtr), &hasZeroWeights);
/* We only set the loaded table as valid if it contains all non-zero
* weights. Otherwise, we set it to check */
- if (!hasZeroWeights)
+ if (!hasZeroWeights && maxSymbolValue == 255)
bs->entropy.huf.repeatMode = HUF_repeat_valid;
RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
- RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
dictPtr += hufHeaderSize;
}
{ unsigned offcodeLog;
- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
/* fill all offset symbols to avoid garbage at end of table */
@@ -4261,7 +5012,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
@@ -4275,7 +5026,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
@@ -4309,7 +5060,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
} } }
- return dictPtr - (const BYTE*)dict;
+ return (size_t)(dictPtr - (const BYTE*)dict);
}
/* Dictionary format :
@@ -4322,11 +5073,12 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
* dictSize supposed >= 8
*/
static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
const void* dict, size_t dictSize,
ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp,
void* workspace)
{
const BYTE* dictPtr = (const BYTE*)dict;
@@ -4345,7 +5097,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
{
size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
- ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
+ ms, NULL, ws, params, dictPtr, dictContentSize, dtlm, tfp), "");
}
return dictID;
}
@@ -4354,13 +5106,14 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
* @return : dictID, or an error code */
static size_t
ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
ldmState_t* ls,
ZSTD_cwksp* ws,
const ZSTD_CCtx_params* params,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp,
void* workspace)
{
DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
@@ -4373,13 +5126,13 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
/* dict restricted modes */
if (dictContentType == ZSTD_dct_rawContent)
- return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
+ return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm, tfp);
if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
if (dictContentType == ZSTD_dct_auto) {
DEBUGLOG(4, "raw content dictionary detected");
return ZSTD_loadDictionaryContent(
- ms, ls, ws, params, dict, dictSize, dtlm);
+ ms, ls, ws, params, dict, dictSize, dtlm, tfp);
}
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
assert(0); /* impossible */
@@ -4387,13 +5140,14 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
/* dict as full zstd dictionary */
return ZSTD_loadZstdDictionary(
- bs, ms, ws, params, dict, dictSize, dtlm, workspace);
+ bs, ms, ws, params, dict, dictSize, dtlm, tfp, workspace);
}
#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
/*! ZSTD_compressBegin_internal() :
+ * Assumption : either @dict OR @cdict (or none) is non-NULL, never both
* @return : 0, or an error code */
static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
@@ -4426,11 +5180,11 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
cdict->dictContentSize, cdict->dictContentType, dtlm,
- cctx->entropyWorkspace)
+ ZSTD_tfp_forCCtx, cctx->tmpWorkspace)
: ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
- dictContentType, dtlm, cctx->entropyWorkspace);
+ dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->tmpWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= UINT_MAX);
cctx->dictID = (U32)dictID;
@@ -4471,11 +5225,11 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
&cctxParams, pledgedSrcSize);
}
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+static size_t
+ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_CCtx_params cctxParams;
- {
- ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
+ { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
}
DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
@@ -4483,9 +5237,15 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
}
+size_t
+ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+{
+ return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel);
+}
+
size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
{
- return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
+ return ZSTD_compressBegin_usingDict_deprecated(cctx, NULL, 0, compressionLevel);
}
@@ -4496,14 +5256,13 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
{
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
- size_t fhSize = 0;
DEBUGLOG(4, "ZSTD_writeEpilogue");
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
/* special case : empty frame */
if (cctx->stage == ZSTDcs_init) {
- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
+ size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
dstCapacity -= fhSize;
op += fhSize;
@@ -4513,8 +5272,9 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
if (cctx->stage != ZSTDcs_ending) {
/* write one last empty block, make it the "last" block */
U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
- RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
- MEM_writeLE32(op, cBlockHeader24);
+ ZSTD_STATIC_ASSERT(ZSTD_BLOCKHEADERSIZE == 3);
+ RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "no room for epilogue");
+ MEM_writeLE24(op, cBlockHeader24);
op += ZSTD_blockHeaderSize;
dstCapacity -= ZSTD_blockHeaderSize;
}
@@ -4528,7 +5288,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
}
cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
- return op-ostart;
+ return (size_t)(op-ostart);
}
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
@@ -4537,9 +5297,9 @@ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
(void)extraCSize;
}
-size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
+size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
{
size_t endResult;
size_t const cSize = ZSTD_compressContinue_internal(cctx,
@@ -4563,6 +5323,14 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
return cSize + endResult;
}
+/* NOTE: Must just wrap ZSTD_compressEnd_public() */
+size_t ZSTD_compressEnd(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);
+}
+
size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@@ -4591,7 +5359,7 @@ size_t ZSTD_compress_advanced_internal(
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) , "");
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+ return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);
}
size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
@@ -4709,7 +5477,7 @@ static size_t ZSTD_initCDict_internal(
{ size_t const dictID = ZSTD_compress_insertDictionary(
&cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
&params, cdict->dictContent, cdict->dictContentSize,
- dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
+ dictContentType, ZSTD_dtlm_full, ZSTD_tfp_forCDict, cdict->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= (size_t)(U32)-1);
cdict->dictID = (U32)dictID;
@@ -4719,14 +5487,16 @@ static size_t ZSTD_initCDict_internal(
return 0;
}
-static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_compressionParameters cParams,
- ZSTD_paramSwitch_e useRowMatchFinder,
- U32 enableDedicatedDictSearch,
- ZSTD_customMem customMem)
+static ZSTD_CDict*
+ZSTD_createCDict_advanced_internal(size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_compressionParameters cParams,
+ ZSTD_ParamSwitch_e useRowMatchFinder,
+ int enableDedicatedDictSearch,
+ ZSTD_customMem customMem)
{
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ DEBUGLOG(3, "ZSTD_createCDict_advanced_internal (dictSize=%u)", (unsigned)dictSize);
{ size_t const workspaceSize =
ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
@@ -4763,6 +5533,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
{
ZSTD_CCtx_params cctxParams;
ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
+ DEBUGLOG(3, "ZSTD_createCDict_advanced, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType);
ZSTD_CCtxParams_init(&cctxParams, 0);
cctxParams.cParams = cParams;
cctxParams.customMem = customMem;
@@ -4783,7 +5554,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
ZSTD_compressionParameters cParams;
ZSTD_CDict* cdict;
- DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType);
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
if (cctxParams.enableDedicatedDictSearch) {
@@ -4802,7 +5573,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
}
- DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch);
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2: DedicatedDictSearch=%u", cctxParams.enableDedicatedDictSearch);
cctxParams.cParams = cParams;
cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
@@ -4810,10 +5581,8 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
dictLoadMethod, cctxParams.cParams,
cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
customMem);
- if (!cdict)
- return NULL;
- if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ if (!cdict || ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
dictLoadMethod, dictContentType,
cctxParams) )) {
@@ -4867,7 +5636,7 @@ size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
* workspaceSize: Use ZSTD_estimateCDictSize()
* to determine how large workspace must be.
* cParams : use ZSTD_getCParams() to transform a compression level
- * into its relevants cParams.
+ * into its relevant cParams.
* @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
* Note : there is no corresponding "free" function.
* Since workspace was allocated externally, it must be freed externally.
@@ -4879,7 +5648,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams)
{
- ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
+ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
/* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
@@ -4890,6 +5659,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_CDict* cdict;
ZSTD_CCtx_params params;
+ DEBUGLOG(4, "ZSTD_initStaticCDict (dictSize==%u)", (unsigned)dictSize);
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
{
@@ -4900,14 +5670,13 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_cwksp_move(&cdict->workspace, &ws);
}
- DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
- (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
ZSTD_CCtxParams_init(&params, 0);
params.cParams = cParams;
params.useRowMatchFinder = useRowMatchFinder;
cdict->useRowMatchFinder = useRowMatchFinder;
+ cdict->compressionLevel = ZSTD_NO_CLEVEL;
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
@@ -4987,12 +5756,17 @@ size_t ZSTD_compressBegin_usingCDict_advanced(
/* ZSTD_compressBegin_usingCDict() :
* cdict must be != NULL */
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
}
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict);
+}
+
/*! ZSTD_compress_usingCDict_internal():
* Implementation of various ZSTD_compress_usingCDict* functions.
*/
@@ -5002,7 +5776,7 @@ static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
{
FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+ return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);
}
/*! ZSTD_compress_usingCDict_advanced():
@@ -5068,7 +5842,7 @@ size_t ZSTD_CStreamOutSize(void)
return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
}
-static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
+static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
{
if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
return ZSTD_cpm_attachDict;
@@ -5199,30 +5973,41 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
{
- size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
- if (hintInSize==0) hintInSize = cctx->blockSize;
- return hintInSize;
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ return cctx->blockSizeMax - cctx->stableIn_notConsumed;
+ }
+ assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered);
+ { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
+ if (hintInSize==0) hintInSize = cctx->blockSizeMax;
+ return hintInSize;
+ }
}
/* ZSTD_compressStream_generic():
* internal function for all *compressStream*() variants
- * non-static, because can be called from zstdmt_compress.c
- * @return : hint size for next input */
+ * @return : hint size for next input to complete ongoing block */
static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective const flushMode)
{
- const char* const istart = (const char*)input->src;
- const char* const iend = input->size != 0 ? istart + input->size : istart;
- const char* ip = input->pos != 0 ? istart + input->pos : istart;
- char* const ostart = (char*)output->dst;
- char* const oend = output->size != 0 ? ostart + output->size : ostart;
- char* op = output->pos != 0 ? ostart + output->pos : ostart;
+ const char* const istart = (assert(input != NULL), (const char*)input->src);
+ const char* const iend = (istart != NULL) ? istart + input->size : istart;
+ const char* ip = (istart != NULL) ? istart + input->pos : istart;
+ char* const ostart = (assert(output != NULL), (char*)output->dst);
+ char* const oend = (ostart != NULL) ? ostart + output->size : ostart;
+ char* op = (ostart != NULL) ? ostart + output->pos : ostart;
U32 someMoreWork = 1;
/* check expectations */
- DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
+ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos);
+ assert(zcs != NULL);
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ assert(input->pos >= zcs->stableIn_notConsumed);
+ input->pos -= zcs->stableIn_notConsumed;
+ if (ip) ip -= zcs->stableIn_notConsumed;
+ zcs->stableIn_notConsumed = 0;
+ }
if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
assert(zcs->inBuff != NULL);
assert(zcs->inBuffSize > 0);
@@ -5231,8 +6016,10 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
assert(zcs->outBuff != NULL);
assert(zcs->outBuffSize > 0);
}
- assert(output->pos <= output->size);
+ if (input->src == NULL) assert(input->size == 0);
assert(input->pos <= input->size);
+ if (output->dst == NULL) assert(output->size == 0);
+ assert(output->pos <= output->size);
assert((U32)flushMode <= (U32)ZSTD_e_end);
while (someMoreWork) {
@@ -5243,12 +6030,13 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
case zcss_load:
if ( (flushMode == ZSTD_e_end)
- && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
+ && ( (size_t)(oend-op) >= ZSTD_compressBound((size_t)(iend-ip)) /* Enough output space */
|| zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
&& (zcs->inBuffPos == 0) ) {
/* shortcut to compression pass directly into output buffer */
- size_t const cSize = ZSTD_compressEnd(zcs,
- op, oend-op, ip, iend-ip);
+ size_t const cSize = ZSTD_compressEnd_public(zcs,
+ op, (size_t)(oend-op),
+ ip, (size_t)(iend-ip));
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
ip = iend;
@@ -5262,10 +6050,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
size_t const loaded = ZSTD_limitCopy(
zcs->inBuff + zcs->inBuffPos, toLoad,
- ip, iend-ip);
+ ip, (size_t)(iend-ip));
zcs->inBuffPos += loaded;
- if (loaded != 0)
- ip += loaded;
+ if (ip) ip += loaded;
if ( (flushMode == ZSTD_e_continue)
&& (zcs->inBuffPos < zcs->inBuffTarget) ) {
/* not enough input to fill full block : stop here */
@@ -5276,16 +6063,29 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
/* empty */
someMoreWork = 0; break;
}
+ } else {
+ assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable);
+ if ( (flushMode == ZSTD_e_continue)
+ && ( (size_t)(iend - ip) < zcs->blockSizeMax) ) {
+ /* can't compress a full block : stop here */
+ zcs->stableIn_notConsumed = (size_t)(iend - ip);
+ ip = iend; /* pretend to have consumed input */
+ someMoreWork = 0; break;
+ }
+ if ( (flushMode == ZSTD_e_flush)
+ && (ip == iend) ) {
+ /* empty */
+ someMoreWork = 0; break;
+ }
}
/* compress current block (note : this stage cannot be stopped in the middle) */
DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
{ int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
void* cDst;
size_t cSize;
- size_t oSize = oend-op;
- size_t const iSize = inputBuffered
- ? zcs->inBuffPos - zcs->inToCompress
- : MIN((size_t)(iend - ip), zcs->blockSize);
+ size_t oSize = (size_t)(oend-op);
+ size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress
+ : MIN((size_t)(iend - ip), zcs->blockSizeMax);
if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
cDst = op; /* compress into output buffer, to skip flush stage */
else
@@ -5293,34 +6093,31 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
if (inputBuffered) {
unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
cSize = lastBlock ?
- ZSTD_compressEnd(zcs, cDst, oSize,
+ ZSTD_compressEnd_public(zcs, cDst, oSize,
zcs->inBuff + zcs->inToCompress, iSize) :
- ZSTD_compressContinue(zcs, cDst, oSize,
+ ZSTD_compressContinue_public(zcs, cDst, oSize,
zcs->inBuff + zcs->inToCompress, iSize);
FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
zcs->frameEnded = lastBlock;
/* prepare next block */
- zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax;
if (zcs->inBuffTarget > zcs->inBuffSize)
- zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSizeMax;
DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
(unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
if (!lastBlock)
assert(zcs->inBuffTarget <= zcs->inBuffSize);
zcs->inToCompress = zcs->inBuffPos;
- } else {
- unsigned const lastBlock = (ip + iSize == iend);
- assert(flushMode == ZSTD_e_end /* Already validated */);
+ } else { /* !inputBuffered, hence ZSTD_bm_stable */
+ unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend);
cSize = lastBlock ?
- ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
- ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
+ ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) :
+ ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize);
/* Consume the input prior to error checking to mirror buffered mode. */
- if (iSize > 0)
- ip += iSize;
+ if (ip) ip += iSize;
FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
zcs->frameEnded = lastBlock;
- if (lastBlock)
- assert(ip == iend);
+ if (lastBlock) assert(ip == iend);
}
if (cDst == op) { /* no need to flush */
op += cSize;
@@ -5369,8 +6166,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
}
}
- input->pos = ip - istart;
- output->pos = op - ostart;
+ input->pos = (size_t)(ip - istart);
+ output->pos = (size_t)(op - ostart);
if (zcs->frameEnded) return 0;
return ZSTD_nextInputSizeHint(zcs);
}
@@ -5390,8 +6187,10 @@ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuf
/* After a compression call set the expected input/output buffer.
* This is validated at the start of the next compression call.
*/
-static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
+static void
+ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, const ZSTD_outBuffer* output, const ZSTD_inBuffer* input)
{
+ DEBUGLOG(5, "ZSTD_setBufferExpectations (for advanced stable in/out modes)");
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
cctx->expectedInBuffer = *input;
}
@@ -5410,22 +6209,27 @@ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
{
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
ZSTD_inBuffer const expect = cctx->expectedInBuffer;
- if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
- RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
- if (endOp != ZSTD_e_end)
- RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
+ if (expect.src != input->src || expect.pos != input->pos)
+ RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!");
}
+ (void)endOp;
if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
size_t const outBufferSize = output->size - output->pos;
if (cctx->expectedOutBufferSize != outBufferSize)
- RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
+ RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableOutBuffer enabled but output size differs!");
}
return 0;
}
+/*
+ * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize.
+ * Otherwise, it's ignored.
+ * @return: 0 on success, or a ZSTD_error code otherwise.
+ */
static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
ZSTD_EndDirective endOp,
- size_t inSize) {
+ size_t inSize)
+{
ZSTD_CCtx_params params = cctx->requestedParams;
ZSTD_prefixDict const prefixDict = cctx->prefixDict;
FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
@@ -5438,21 +6242,24 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
*/
params.compressionLevel = cctx->cdict->compressionLevel;
}
- DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
- if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
- {
- size_t const dictSize = prefixDict.dict
+ DEBUGLOG(4, "ZSTD_CCtx_init_compressStream2 : transparent init stage");
+ if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */
+
+ { size_t const dictSize = prefixDict.dict
? prefixDict.dictSize
: (cctx->cdict ? cctx->cdict->dictContentSize : 0);
- ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
+ ZSTD_CParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
params.cParams = ZSTD_getCParamsFromCCtxParams(
&params, cctx->pledgedSrcSizePlusOne-1,
dictSize, mode);
}
- params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, &params.cParams);
+ params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(params.postBlockSplitter, &params.cParams);
params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, &params.cParams);
params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, &params.cParams);
+ params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences);
+ params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize);
+ params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel);
{ U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
@@ -5468,7 +6275,7 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
/* for small input: avoid automatic flush on reaching end of block, since
* it would require to add a 3-bytes null block to end frame
*/
- cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
+ cctx->inBuffTarget = cctx->blockSizeMax + (cctx->blockSizeMax == pledgedSrcSize);
} else {
cctx->inBuffTarget = 0;
}
@@ -5479,6 +6286,8 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
return 0;
}
+/* @return provides a minimum amount of data remaining to be flushed from internal buffers
+ */
size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
@@ -5493,8 +6302,27 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
/* transparent initialization stage */
if (cctx->streamStage == zcss_init) {
- FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
- ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
+ size_t const inputSize = input->size - input->pos; /* no obligation to start from pos==0 */
+ size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed;
+ if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */
+ && (endOp == ZSTD_e_continue) /* no flush requested, more input to come */
+ && (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) { /* not even reached one block yet */
+ if (cctx->stableIn_notConsumed) { /* not the first time */
+ /* check stable source guarantees */
+ RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer");
+ RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos");
+ }
+ /* pretend input was consumed, to give a sense forward progress */
+ input->pos = input->size;
+ /* save stable inBuffer, for later control, and flush/end */
+ cctx->expectedInBuffer = *input;
+ /* but actually input wasn't consumed, so keep track of position from where compression shall resume */
+ cctx->stableIn_notConsumed += inputSize;
+ /* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */
+ return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format); /* at least some header to produce */
+ }
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed");
+ ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
}
/* end of transparent initialization stage */
@@ -5512,13 +6340,20 @@ size_t ZSTD_compressStream2_simpleArgs (
const void* src, size_t srcSize, size_t* srcPos,
ZSTD_EndDirective endOp)
{
- ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
- ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ ZSTD_outBuffer output;
+ ZSTD_inBuffer input;
+ output.dst = dst;
+ output.size = dstCapacity;
+ output.pos = *dstPos;
+ input.src = src;
+ input.size = srcSize;
+ input.pos = *srcPos;
/* ZSTD_compressStream2() will check validity of dstPos and srcPos */
- size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
- *dstPos = output.pos;
- *srcPos = input.pos;
- return cErr;
+ { size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+ }
}
size_t ZSTD_compress2(ZSTD_CCtx* cctx,
@@ -5541,6 +6376,7 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
/* Reset to the original values. */
cctx->requestedParams.inBufferMode = originalInBufferMode;
cctx->requestedParams.outBufferMode = originalOutBufferMode;
+
FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
if (result != 0) { /* compression not completed, due to lack of output space */
assert(oPos == dstCapacity);
@@ -5551,64 +6387,67 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
}
}
-typedef struct {
- U32 idx; /* Index in array of ZSTD_Sequence */
- U32 posInSequence; /* Position within sequence at idx */
- size_t posInSrc; /* Number of bytes given by sequences provided so far */
-} ZSTD_sequencePosition;
-
/* ZSTD_validateSequence() :
- * @offCode : is presumed to follow format required by ZSTD_storeSeq()
+ * @offBase : must use the format required by ZSTD_storeSeq()
* @returns a ZSTD error code if sequence is not valid
*/
static size_t
-ZSTD_validateSequence(U32 offCode, U32 matchLength,
- size_t posInSrc, U32 windowLog, size_t dictSize)
+ZSTD_validateSequence(U32 offBase, U32 matchLength, U32 minMatch,
+ size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer)
{
- U32 const windowSize = 1 << windowLog;
+ U32 const windowSize = 1u << windowLog;
/* posInSrc represents the amount of data the decoder would decode up to this point.
* As long as the amount of data decoded is less than or equal to window size, offsets may be
* larger than the total length of output decoded in order to reference the dict, even larger than
* window size. After output surpasses windowSize, we're limited to windowSize offsets again.
*/
size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
- RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!");
- RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small");
+ size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4;
+ RETURN_ERROR_IF(offBase > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!");
+ /* Validate maxNbSeq is large enough for the given matchLength and minMatch */
+ RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch");
return 0;
}
/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
-static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
+static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
{
- U32 offCode = STORE_OFFSET(rawOffset);
+ U32 offBase = OFFSET_TO_OFFBASE(rawOffset);
if (!ll0 && rawOffset == rep[0]) {
- offCode = STORE_REPCODE_1;
+ offBase = REPCODE1_TO_OFFBASE;
} else if (rawOffset == rep[1]) {
- offCode = STORE_REPCODE(2 - ll0);
+ offBase = REPCODE_TO_OFFBASE(2 - ll0);
} else if (rawOffset == rep[2]) {
- offCode = STORE_REPCODE(3 - ll0);
+ offBase = REPCODE_TO_OFFBASE(3 - ll0);
} else if (ll0 && rawOffset == rep[0] - 1) {
- offCode = STORE_REPCODE_3;
+ offBase = REPCODE3_TO_OFFBASE;
}
- return offCode;
+ return offBase;
}
-/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
- * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
+/* This function scans through an array of ZSTD_Sequence,
+ * storing the sequences it reads, until it reaches a block delimiter.
+ * Note that the block delimiter includes the last literals of the block.
+ * @blockSize must be == sum(sequence_lengths).
+ * @returns @blockSize on success, and a ZSTD_error otherwise.
*/
static size_t
-ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
- ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize)
+ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch)
{
U32 idx = seqPos->idx;
+ U32 const startIdx = idx;
BYTE const* ip = (BYTE const*)(src);
const BYTE* const iend = ip + blockSize;
- repcodes_t updatedRepcodes;
+ Repcodes_t updatedRepcodes;
U32 dictSize;
+ DEBUGLOG(5, "ZSTD_transferSequences_wBlockDelim (blockSize = %zu)", blockSize);
+
if (cctx->cdict) {
dictSize = (U32)cctx->cdict->dictContentSize;
} else if (cctx->prefixDict.dict) {
@@ -5616,27 +6455,60 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
} else {
dictSize = 0;
}
- ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
- for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+ for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) {
U32 const litLength = inSeqs[idx].litLength;
- U32 const ll0 = (litLength == 0);
U32 const matchLength = inSeqs[idx].matchLength;
- U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
- ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+ U32 offBase;
- DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ if (externalRepSearch == ZSTD_ps_disable) {
+ offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset);
+ } else {
+ U32 const ll0 = (litLength == 0);
+ offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
+ ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
+ }
+
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
if (cctx->appliedParams.validateSequences) {
seqPos->posInSrc += litLength + matchLength;
- FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
- cctx->appliedParams.cParams.windowLog, dictSize),
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch,
+ seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize,
+ ZSTD_hasExtSeqProd(&cctx->appliedParams)),
"Sequence validation failed");
}
- RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
"Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
- ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength);
ip += matchLength + litLength;
}
- ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+ RETURN_ERROR_IF(idx == inSeqsSize, externalSequences_invalid, "Block delimiter not found.");
+
+ /* If we skipped repcode search while parsing, we need to update repcodes now */
+ assert(externalRepSearch != ZSTD_ps_auto);
+ assert(idx >= startIdx);
+ if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) {
+ U32* const rep = updatedRepcodes.rep;
+ U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */
+
+ if (lastSeqIdx >= startIdx + 2) {
+ rep[2] = inSeqs[lastSeqIdx - 2].offset;
+ rep[1] = inSeqs[lastSeqIdx - 1].offset;
+ rep[0] = inSeqs[lastSeqIdx].offset;
+ } else if (lastSeqIdx == startIdx + 1) {
+ rep[2] = rep[0];
+ rep[1] = inSeqs[lastSeqIdx - 1].offset;
+ rep[0] = inSeqs[lastSeqIdx].offset;
+ } else {
+ assert(lastSeqIdx == startIdx);
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = inSeqs[lastSeqIdx].offset;
+ }
+ }
+
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));
if (inSeqs[idx].litLength) {
DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
@@ -5644,37 +6516,43 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
ip += inSeqs[idx].litLength;
seqPos->posInSrc += inSeqs[idx].litLength;
}
- RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
+ RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!");
seqPos->idx = idx+1;
- return 0;
+ return blockSize;
}
-/* Returns the number of bytes to move the current read position back by. Only non-zero
- * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
- * went wrong.
+/*
+ * This function attempts to scan through @blockSize bytes in @src
+ * represented by the sequences in @inSeqs,
+ * storing any (partial) sequences.
*
- * This function will attempt to scan through blockSize bytes represented by the sequences
- * in inSeqs, storing any (partial) sequences.
+ * Occasionally, we may want to reduce the actual number of bytes consumed from @src
+ * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH.
*
- * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
- * avoid splitting a match, or to avoid splitting a match such that it would produce a match
- * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
+ * @returns the number of bytes consumed from @src, necessarily <= @blockSize.
+ * Otherwise, it may return a ZSTD error if something went wrong.
*/
static size_t
-ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize)
+ZSTD_transferSequences_noDelim(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch)
{
U32 idx = seqPos->idx;
U32 startPosInSequence = seqPos->posInSequence;
U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
size_t dictSize;
- BYTE const* ip = (BYTE const*)(src);
- BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
- repcodes_t updatedRepcodes;
+ const BYTE* const istart = (const BYTE*)(src);
+ const BYTE* ip = istart;
+ const BYTE* iend = istart + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
+ Repcodes_t updatedRepcodes;
U32 bytesAdjustment = 0;
U32 finalMatchSplit = 0;
+ /* TODO(embg) support fast parsing mode in noBlockDelim mode */
+ (void)externalRepSearch;
+
if (cctx->cdict) {
dictSize = cctx->cdict->dictContentSize;
} else if (cctx->prefixDict.dict) {
@@ -5682,15 +6560,15 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
} else {
dictSize = 0;
}
- DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
+ DEBUGLOG(5, "ZSTD_transferSequences_noDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
- ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));
while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
const ZSTD_Sequence currSeq = inSeqs[idx];
U32 litLength = currSeq.litLength;
U32 matchLength = currSeq.matchLength;
U32 const rawOffset = currSeq.offset;
- U32 offCode;
+ U32 offBase;
/* Modify the sequence depending on where endPosInSequence lies */
if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
@@ -5704,7 +6582,6 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
/* Move to the next sequence */
endPosInSequence -= currSeq.litLength + currSeq.matchLength;
startPosInSequence = 0;
- idx++;
} else {
/* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
does not reach the end of the match. So, we have to split the sequence */
@@ -5744,58 +6621,113 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
}
/* Check if this offset can be represented with a repcode */
{ U32 const ll0 = (litLength == 0);
- offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
- ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+ offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0);
+ ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
}
if (cctx->appliedParams.validateSequences) {
seqPos->posInSrc += litLength + matchLength;
- FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
- cctx->appliedParams.cParams.windowLog, dictSize),
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)),
"Sequence validation failed");
}
- DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
- RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
+ RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
"Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
- ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength);
ip += matchLength + litLength;
+ if (!finalMatchSplit)
+ idx++; /* Next Sequence */
}
DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
seqPos->idx = idx;
seqPos->posInSequence = endPosInSequence;
- ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));
iend -= bytesAdjustment;
if (ip != iend) {
/* Store any last literals */
- U32 lastLLSize = (U32)(iend - ip);
+ U32 const lastLLSize = (U32)(iend - ip);
assert(ip <= iend);
DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
seqPos->posInSrc += lastLLSize;
}
- return bytesAdjustment;
+ return (size_t)(iend-istart);
}
-typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
- const void* src, size_t blockSize);
-static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
+/* @seqPos represents a position within @inSeqs,
+ * it is read and updated by this function,
+ * once the goal to produce a block of size @blockSize is reached.
+ * @return: nb of bytes consumed from @src, necessarily <= @blockSize.
+ */
+typedef size_t (*ZSTD_SequenceCopier_f)(ZSTD_CCtx* cctx,
+ ZSTD_SequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize,
+ ZSTD_ParamSwitch_e externalRepSearch);
+
+static ZSTD_SequenceCopier_f ZSTD_selectSequenceCopier(ZSTD_SequenceFormat_e mode)
{
- ZSTD_sequenceCopier sequenceCopier = NULL;
- assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, (int)mode));
if (mode == ZSTD_sf_explicitBlockDelimiters) {
- return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
- } else if (mode == ZSTD_sf_noBlockDelimiters) {
- return ZSTD_copySequencesToSeqStoreNoBlockDelim;
+ return ZSTD_transferSequences_wBlockDelim;
+ }
+ assert(mode == ZSTD_sf_noBlockDelimiters);
+ return ZSTD_transferSequences_noDelim;
+}
+
+/* Discover the size of next block by searching for the delimiter.
+ * Note that a block delimiter **must** exist in this mode,
+ * otherwise it's an input error.
+ * The block size retrieved will be later compared to ensure it remains within bounds */
+static size_t
+blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_SequencePosition seqPos)
+{
+ int end = 0;
+ size_t blockSize = 0;
+ size_t spos = seqPos.idx;
+ DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize);
+ assert(spos <= inSeqsSize);
+ while (spos < inSeqsSize) {
+ end = (inSeqs[spos].offset == 0);
+ blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength;
+ if (end) {
+ if (inSeqs[spos].matchLength != 0)
+ RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0");
+ break;
+ }
+ spos++;
+ }
+ if (!end)
+ RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter");
+ return blockSize;
+}
+
+static size_t determine_blockSize(ZSTD_SequenceFormat_e mode,
+ size_t blockSize, size_t remaining,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ ZSTD_SequencePosition seqPos)
+{
+ DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining);
+ if (mode == ZSTD_sf_noBlockDelimiters) {
+ /* Note: more a "target" block size */
+ return MIN(remaining, blockSize);
+ }
+ assert(mode == ZSTD_sf_explicitBlockDelimiters);
+ { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos);
+ FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters");
+ if (explicitBlockSize > blockSize)
+ RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block");
+ if (explicitBlockSize > remaining)
+ RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source");
+ return explicitBlockSize;
}
- assert(sequenceCopier != NULL);
- return sequenceCopier;
}
-/* Compress, block-by-block, all of the sequences given.
+/* Compress all provided sequences, block-by-block.
*
* Returns the cumulative size of all compressed blocks (including their headers),
* otherwise a ZSTD error.
@@ -5807,15 +6739,12 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
const void* src, size_t srcSize)
{
size_t cSize = 0;
- U32 lastBlock;
- size_t blockSize;
- size_t compressedSeqsSize;
size_t remaining = srcSize;
- ZSTD_sequencePosition seqPos = {0, 0, 0};
+ ZSTD_SequencePosition seqPos = {0, 0, 0};
- BYTE const* ip = (BYTE const*)src;
+ const BYTE* ip = (BYTE const*)src;
BYTE* op = (BYTE*)dst;
- ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
+ ZSTD_SequenceCopier_f const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
/* Special case: empty frame */
@@ -5829,22 +6758,29 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
}
while (remaining) {
+ size_t compressedSeqsSize;
size_t cBlockSize;
- size_t additionalByteAdjustment;
- lastBlock = remaining <= cctx->blockSize;
- blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
+ size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters,
+ cctx->blockSizeMax, remaining,
+ inSeqs, inSeqsSize, seqPos);
+ U32 const lastBlock = (blockSize == remaining);
+ FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size");
+ assert(blockSize <= remaining);
ZSTD_resetSeqStore(&cctx->seqStore);
- DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
- additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
- FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
- blockSize -= additionalByteAdjustment;
+ blockSize = sequenceCopier(cctx,
+ &seqPos, inSeqs, inSeqsSize,
+ ip, blockSize,
+ cctx->appliedParams.searchForExternalRepcodes);
+ FORWARD_IF_ERROR(blockSize, "Bad sequence copy");
/* If blocks are too small, emit as a nocompress block */
- if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
+ * additional 1. We need to revisit and change this logic to be more consistent */
+ if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {
cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
- DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
+ DEBUGLOG(5, "Block too small (%zu): data remains uncompressed: cSize=%zu", blockSize, cBlockSize);
cSize += cBlockSize;
ip += blockSize;
op += cBlockSize;
@@ -5853,35 +6789,36 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
continue;
}
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block");
compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
&cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
&cctx->appliedParams,
op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
blockSize,
- cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */,
cctx->bmi2);
FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
- DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
+ DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize);
if (!cctx->isFirstBlock &&
ZSTD_maybeRLE(&cctx->seqStore) &&
- ZSTD_isRLE((BYTE const*)src, srcSize)) {
- /* We don't want to emit our first block as a RLE even if it qualifies because
- * doing so will cause the decoder (cli only) to throw a "should consume all input error."
- * This is only an issue for zstd <= v1.4.3
- */
+ ZSTD_isRLE(ip, blockSize)) {
+ /* Note: don't emit the first block as RLE even if it qualifies because
+ * doing so will cause the decoder (cli <= v1.4.3 only) to throw an (invalid) error
+ * "should consume all input error."
+ */
compressedSeqsSize = 1;
}
if (compressedSeqsSize == 0) {
/* ZSTD_noCompressBlock writes the block header as well */
cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
- FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
- DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
+ FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed");
+ DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize);
} else if (compressedSeqsSize == 1) {
cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
- FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
- DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
+ FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed");
+ DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize);
} else {
U32 cBlockHeader;
/* Error checking and repcodes update */
@@ -5893,11 +6830,10 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
MEM_writeLE24(op, cBlockHeader);
cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
- DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
+ DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize);
}
cSize += cBlockSize;
- DEBUGLOG(4, "cSize running total: %zu", cSize);
if (lastBlock) {
break;
@@ -5908,41 +6844,50 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
dstCapacity -= cBlockSize;
cctx->isFirstBlock = 0;
}
+ DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity);
}
+ DEBUGLOG(4, "cSize final total: %zu", cSize);
return cSize;
}
-size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
+size_t ZSTD_compressSequences(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
const void* src, size_t srcSize)
{
BYTE* op = (BYTE*)dst;
size_t cSize = 0;
- size_t compressedBlocksSize = 0;
- size_t frameHeaderSize = 0;
/* Transparent initialization stage, same as compressStream2() */
- DEBUGLOG(3, "ZSTD_compressSequences()");
+ DEBUGLOG(4, "ZSTD_compressSequences (nbSeqs=%zu,dstCapacity=%zu)", inSeqsSize, dstCapacity);
assert(cctx != NULL);
FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
+
/* Begin writing output, starting with frame header */
- frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
- op += frameHeaderSize;
- dstCapacity -= frameHeaderSize;
- cSize += frameHeaderSize;
+ { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity,
+ &cctx->appliedParams, srcSize, cctx->dictID);
+ op += frameHeaderSize;
+ assert(frameHeaderSize <= dstCapacity);
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ }
if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
xxh64_update(&cctx->xxhState, src, srcSize);
}
- /* cSize includes block header size and compressed sequences size */
- compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
+
+ /* Now generate compressed blocks */
+ { size_t const cBlocksSize = ZSTD_compressSequences_internal(cctx,
op, dstCapacity,
inSeqs, inSeqsSize,
src, srcSize);
- FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
- cSize += compressedBlocksSize;
- dstCapacity -= compressedBlocksSize;
+ FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!");
+ cSize += cBlocksSize;
+ assert(cBlocksSize <= dstCapacity);
+ dstCapacity -= cBlocksSize;
+ }
+ /* Complete with frame checksum, if needed */
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
@@ -5951,26 +6896,557 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci
cSize += 4;
}
- DEBUGLOG(3, "Final compressed size: %zu", cSize);
+ DEBUGLOG(4, "Final compressed size: %zu", cSize);
+ return cSize;
+}
+
+
+#if defined(__AVX2__)
+
+#include <immintrin.h> /* AVX2 intrinsics */
+
+/*
+ * Convert 2 sequences per iteration, using AVX2 intrinsics:
+ * - offset -> offBase = offset + 2
+ * - litLength -> (U16) litLength
+ * - matchLength -> (U16)(matchLength - 3)
+ * - rep is ignored
+ * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]).
+ *
+ * At the end, instead of extracting two __m128i,
+ * we use _mm256_permute4x64_epi64(..., 0xE8) to move lane2 into lane1,
+ * then store the lower 16 bytes in one go.
+ *
+ * @returns 0 on succes, with no long length detected
+ * @returns > 0 if there is one long length (> 65535),
+ * indicating the position, and type.
+ */
+static size_t convertSequences_noRepcodes(
+ SeqDef* dstSeqs,
+ const ZSTD_Sequence* inSeqs,
+ size_t nbSequences)
+{
+ /*
+ * addition:
+ * For each 128-bit half: (offset+2, litLength+0, matchLength-3, rep+0)
+ */
+ const __m256i addition = _mm256_setr_epi32(
+ ZSTD_REP_NUM, 0, -MINMATCH, 0, /* for sequence i */
+ ZSTD_REP_NUM, 0, -MINMATCH, 0 /* for sequence i+1 */
+ );
+
+ /* limit: check if there is a long length */
+ const __m256i limit = _mm256_set1_epi32(65535);
+
+ /*
+ * shuffle mask for byte-level rearrangement in each 128-bit half:
+ *
+ * Input layout (after addition) per 128-bit half:
+ * [ offset+2 (4 bytes) | litLength (4 bytes) | matchLength (4 bytes) | rep (4 bytes) ]
+ * We only need:
+ * offBase (4 bytes) = offset+2
+ * litLength (2 bytes) = low 2 bytes of litLength
+ * mlBase (2 bytes) = low 2 bytes of (matchLength)
+ * => Bytes [0..3, 4..5, 8..9], zero the rest.
+ */
+ const __m256i mask = _mm256_setr_epi8(
+ /* For the lower 128 bits => sequence i */
+ 0, 1, 2, 3, /* offset+2 */
+ 4, 5, /* litLength (16 bits) */
+ 8, 9, /* matchLength (16 bits) */
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80,
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80,
+
+ /* For the upper 128 bits => sequence i+1 */
+ 16,17,18,19, /* offset+2 */
+ 20,21, /* litLength */
+ 24,25, /* matchLength */
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80,
+ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80
+ );
+
+ /*
+ * Next, we'll use _mm256_permute4x64_epi64(vshf, 0xE8).
+ * Explanation of 0xE8 = 11101000b => [lane0, lane2, lane2, lane3].
+ * So the lower 128 bits become [lane0, lane2] => combining seq0 and seq1.
+ */
+#define PERM_LANE_0X_E8 0xE8 /* [0,2,2,3] in lane indices */
+
+ size_t longLen = 0, i = 0;
+
+ /* AVX permutation depends on the specific definition of target structures */
+ ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16);
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0);
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4);
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8);
+ ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8);
+ ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0);
+ ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4);
+ ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6);
+
+ /* Process 2 sequences per loop iteration */
+ for (; i + 1 < nbSequences; i += 2) {
+ /* Load 2 ZSTD_Sequence (32 bytes) */
+ __m256i vin = _mm256_loadu_si256((const __m256i*)(const void*)&inSeqs[i]);
+
+ /* Add {2, 0, -3, 0} in each 128-bit half */
+ __m256i vadd = _mm256_add_epi32(vin, addition);
+
+ /* Check for long length */
+ __m256i ll_cmp = _mm256_cmpgt_epi32(vadd, limit); /* 0xFFFFFFFF for element > 65535 */
+ int ll_res = _mm256_movemask_epi8(ll_cmp);
+
+ /* Shuffle bytes so each half gives us the 8 bytes we need */
+ __m256i vshf = _mm256_shuffle_epi8(vadd, mask);
+ /*
+ * Now:
+ * Lane0 = seq0's 8 bytes
+ * Lane1 = 0
+ * Lane2 = seq1's 8 bytes
+ * Lane3 = 0
+ */
+
+ /* Permute 64-bit lanes => move Lane2 down into Lane1. */
+ __m256i vperm = _mm256_permute4x64_epi64(vshf, PERM_LANE_0X_E8);
+ /*
+ * Now the lower 16 bytes (Lane0+Lane1) = [seq0, seq1].
+ * The upper 16 bytes are [Lane2, Lane3] = [seq1, 0], but we won't use them.
+ */
+
+ /* Store only the lower 16 bytes => 2 SeqDef (8 bytes each) */
+ _mm_storeu_si128((__m128i *)(void*)&dstSeqs[i], _mm256_castsi256_si128(vperm));
+ /*
+ * This writes out 16 bytes total:
+ * - offset 0..7 => seq0 (offBase, litLength, mlBase)
+ * - offset 8..15 => seq1 (offBase, litLength, mlBase)
+ */
+
+ /* check (unlikely) long lengths > 65535
+ * indices for lengths correspond to bits [4..7], [8..11], [20..23], [24..27]
+ * => combined mask = 0x0FF00FF0
+ */
+ if (UNLIKELY((ll_res & 0x0FF00FF0) != 0)) {
+ /* long length detected: let's figure out which one*/
+ if (inSeqs[i].matchLength > 65535+MINMATCH) {
+ assert(longLen == 0);
+ longLen = i + 1;
+ }
+ if (inSeqs[i].litLength > 65535) {
+ assert(longLen == 0);
+ longLen = i + nbSequences + 1;
+ }
+ if (inSeqs[i+1].matchLength > 65535+MINMATCH) {
+ assert(longLen == 0);
+ longLen = i + 1 + 1;
+ }
+ if (inSeqs[i+1].litLength > 65535) {
+ assert(longLen == 0);
+ longLen = i + 1 + nbSequences + 1;
+ }
+ }
+ }
+
+ /* Handle leftover if @nbSequences is odd */
+ if (i < nbSequences) {
+ /* process last sequence */
+ assert(i == nbSequences - 1);
+ dstSeqs[i].offBase = OFFSET_TO_OFFBASE(inSeqs[i].offset);
+ dstSeqs[i].litLength = (U16)inSeqs[i].litLength;
+ dstSeqs[i].mlBase = (U16)(inSeqs[i].matchLength - MINMATCH);
+ /* check (unlikely) long lengths > 65535 */
+ if (UNLIKELY(inSeqs[i].matchLength > 65535+MINMATCH)) {
+ assert(longLen == 0);
+ longLen = i + 1;
+ }
+ if (UNLIKELY(inSeqs[i].litLength > 65535)) {
+ assert(longLen == 0);
+ longLen = i + nbSequences + 1;
+ }
+ }
+
+ return longLen;
+}
+
+/* the vector implementation could also be ported to SSSE3,
+ * but since this implementation is targeting modern systems (>= Sapphire Rapid),
+ * it's not useful to develop and maintain code for older pre-AVX2 platforms */
+
+#else /* no AVX2 */
+
+static size_t convertSequences_noRepcodes(
+ SeqDef* dstSeqs,
+ const ZSTD_Sequence* inSeqs,
+ size_t nbSequences)
+{
+ size_t longLen = 0;
+ size_t n;
+ for (n=0; n<nbSequences; n++) {
+ dstSeqs[n].offBase = OFFSET_TO_OFFBASE(inSeqs[n].offset);
+ dstSeqs[n].litLength = (U16)inSeqs[n].litLength;
+ dstSeqs[n].mlBase = (U16)(inSeqs[n].matchLength - MINMATCH);
+ /* check for long length > 65535 */
+ if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) {
+ assert(longLen == 0);
+ longLen = n + 1;
+ }
+ if (UNLIKELY(inSeqs[n].litLength > 65535)) {
+ assert(longLen == 0);
+ longLen = n + nbSequences + 1;
+ }
+ }
+ return longLen;
+}
+
+#endif
+
+/*
+ * Precondition: Sequences must end on an explicit Block Delimiter
+ * @return: 0 on success, or an error code.
+ * Note: Sequence validation functionality has been disabled (removed).
+ * This is helpful to generate a lean main pipeline, improving performance.
+ * It may be re-inserted later.
+ */
+size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx,
+ const ZSTD_Sequence* const inSeqs, size_t nbSequences,
+ int repcodeResolution)
+{
+ Repcodes_t updatedRepcodes;
+ size_t seqNb = 0;
+
+ DEBUGLOG(5, "ZSTD_convertBlockSequences (nbSequences = %zu)", nbSequences);
+
+ RETURN_ERROR_IF(nbSequences >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t));
+
+ /* check end condition */
+ assert(nbSequences >= 1);
+ assert(inSeqs[nbSequences-1].matchLength == 0);
+ assert(inSeqs[nbSequences-1].offset == 0);
+
+ /* Convert Sequences from public format to internal format */
+ if (!repcodeResolution) {
+ size_t const longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences-1);
+ cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences-1;
+ if (longl) {
+ DEBUGLOG(5, "long length");
+ assert(cctx->seqStore.longLengthType == ZSTD_llt_none);
+ if (longl <= nbSequences-1) {
+ DEBUGLOG(5, "long match length detected at pos %zu", longl-1);
+ cctx->seqStore.longLengthType = ZSTD_llt_matchLength;
+ cctx->seqStore.longLengthPos = (U32)(longl-1);
+ } else {
+ DEBUGLOG(5, "long literals length detected at pos %zu", longl-nbSequences);
+ assert(longl <= 2* (nbSequences-1));
+ cctx->seqStore.longLengthType = ZSTD_llt_literalLength;
+ cctx->seqStore.longLengthPos = (U32)(longl-(nbSequences-1)-1);
+ }
+ }
+ } else {
+ for (seqNb = 0; seqNb < nbSequences - 1 ; seqNb++) {
+ U32 const litLength = inSeqs[seqNb].litLength;
+ U32 const matchLength = inSeqs[seqNb].matchLength;
+ U32 const ll0 = (litLength == 0);
+ U32 const offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0);
+
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
+ ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength);
+ ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
+ }
+ }
+
+ /* If we skipped repcode search while parsing, we need to update repcodes now */
+ if (!repcodeResolution && nbSequences > 1) {
+ U32* const rep = updatedRepcodes.rep;
+
+ if (nbSequences >= 4) {
+ U32 lastSeqIdx = (U32)nbSequences - 2; /* index of last full sequence */
+ rep[2] = inSeqs[lastSeqIdx - 2].offset;
+ rep[1] = inSeqs[lastSeqIdx - 1].offset;
+ rep[0] = inSeqs[lastSeqIdx].offset;
+ } else if (nbSequences == 3) {
+ rep[2] = rep[0];
+ rep[1] = inSeqs[0].offset;
+ rep[0] = inSeqs[1].offset;
+ } else {
+ assert(nbSequences == 2);
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = inSeqs[0].offset;
+ }
+ }
+
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t));
+
+ return 0;
+}
+
+#if defined(ZSTD_ARCH_X86_AVX2)
+
+BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs)
+{
+ size_t i;
+ __m256i const zeroVec = _mm256_setzero_si256();
+ __m256i sumVec = zeroVec; /* accumulates match+lit in 32-bit lanes */
+ ZSTD_ALIGNED(32) U32 tmp[8]; /* temporary buffer for reduction */
+ size_t mSum = 0, lSum = 0;
+ ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16);
+
+ /* Process 2 structs (32 bytes) at a time */
+ for (i = 0; i + 2 <= nbSeqs; i += 2) {
+ /* Load two consecutive ZSTD_Sequence (8×4 = 32 bytes) */
+ __m256i data = _mm256_loadu_si256((const __m256i*)(const void*)&seqs[i]);
+ /* check end of block signal */
+ __m256i cmp = _mm256_cmpeq_epi32(data, zeroVec);
+ int cmp_res = _mm256_movemask_epi8(cmp);
+ /* indices for match lengths correspond to bits [8..11], [24..27]
+ * => combined mask = 0x0F000F00 */
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8);
+ if (cmp_res & 0x0F000F00) break;
+ /* Accumulate in sumVec */
+ sumVec = _mm256_add_epi32(sumVec, data);
+ }
+
+ /* Horizontal reduction */
+ _mm256_store_si256((__m256i*)tmp, sumVec);
+ lSum = tmp[1] + tmp[5];
+ mSum = tmp[2] + tmp[6];
+
+ /* Handle the leftover */
+ for (; i < nbSeqs; i++) {
+ lSum += seqs[i].litLength;
+ mSum += seqs[i].matchLength;
+ if (seqs[i].matchLength == 0) break; /* end of block */
+ }
+
+ if (i==nbSeqs) {
+ /* reaching end of sequences: end of block signal was not present */
+ BlockSummary bs;
+ bs.nbSequences = ERROR(externalSequences_invalid);
+ return bs;
+ }
+ { BlockSummary bs;
+ bs.nbSequences = i+1;
+ bs.blockSize = lSum + mSum;
+ bs.litSize = lSum;
+ return bs;
+ }
+}
+
+#else
+
+BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs)
+{
+ size_t totalMatchSize = 0;
+ size_t litSize = 0;
+ size_t n;
+ assert(seqs);
+ for (n=0; n<nbSeqs; n++) {
+ totalMatchSize += seqs[n].matchLength;
+ litSize += seqs[n].litLength;
+ if (seqs[n].matchLength == 0) {
+ assert(seqs[n].offset == 0);
+ break;
+ }
+ }
+ if (n==nbSeqs) {
+ BlockSummary bs;
+ bs.nbSequences = ERROR(externalSequences_invalid);
+ return bs;
+ }
+ { BlockSummary bs;
+ bs.nbSequences = n+1;
+ bs.blockSize = litSize + totalMatchSize;
+ bs.litSize = litSize;
+ return bs;
+ }
+}
+#endif
+
+
+static size_t
+ZSTD_compressSequencesAndLiterals_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t nbSequences,
+ const void* literals, size_t litSize, size_t srcSize)
+{
+ size_t remaining = srcSize;
+ size_t cSize = 0;
+ BYTE* op = (BYTE*)dst;
+ int const repcodeResolution = (cctx->appliedParams.searchForExternalRepcodes == ZSTD_ps_enable);
+ assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_ps_auto);
+
+ DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals_internal: nbSeqs=%zu, litSize=%zu", nbSequences, litSize);
+ RETURN_ERROR_IF(nbSequences == 0, externalSequences_invalid, "Requires at least 1 end-of-block");
+
+ /* Special case: empty frame */
+ if ((nbSequences == 1) && (inSeqs[0].litLength == 0)) {
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
+ RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "No room for empty frame block header");
+ MEM_writeLE24(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ cSize += ZSTD_blockHeaderSize;
+ }
+
+ while (nbSequences) {
+ size_t compressedSeqsSize, cBlockSize, conversionStatus;
+ BlockSummary const block = ZSTD_get1BlockSummary(inSeqs, nbSequences);
+ U32 const lastBlock = (block.nbSequences == nbSequences);
+ FORWARD_IF_ERROR(block.nbSequences, "Error while trying to determine nb of sequences for a block");
+ assert(block.nbSequences <= nbSequences);
+ RETURN_ERROR_IF(block.litSize > litSize, externalSequences_invalid, "discrepancy: Sequences require more literals than present in buffer");
+ ZSTD_resetSeqStore(&cctx->seqStore);
+
+ conversionStatus = ZSTD_convertBlockSequences(cctx,
+ inSeqs, block.nbSequences,
+ repcodeResolution);
+ FORWARD_IF_ERROR(conversionStatus, "Bad sequence conversion");
+ inSeqs += block.nbSequences;
+ nbSequences -= block.nbSequences;
+ remaining -= block.blockSize;
+
+ /* Note: when blockSize is very small, other variant send it uncompressed.
+ * Here, we still send the sequences, because we don't have the original source to send it uncompressed.
+ * One could imagine in theory reproducing the source from the sequences,
+ * but that's complex and costly memory intensive, and goes against the objectives of this variant. */
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block");
+
+ compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal(
+ op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
+ literals, block.litSize,
+ &cctx->seqStore,
+ &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
+ &cctx->appliedParams,
+ cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */,
+ cctx->bmi2);
+ FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
+ /* note: the spec forbids for any compressed block to be larger than maximum block size */
+ if (compressedSeqsSize > cctx->blockSizeMax) compressedSeqsSize = 0;
+ DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize);
+ litSize -= block.litSize;
+ literals = (const char*)literals + block.litSize;
+
+ /* Note: difficult to check source for RLE block when only Literals are provided,
+ * but it could be considered from analyzing the sequence directly */
+
+ if (compressedSeqsSize == 0) {
+ /* Sending uncompressed blocks is out of reach, because the source is not provided.
+ * In theory, one could use the sequences to regenerate the source, like a decompressor,
+ * but it's complex, and memory hungry, killing the purpose of this variant.
+ * Current outcome: generate an error code.
+ */
+ RETURN_ERROR(cannotProduce_uncompressedBlock, "ZSTD_compressSequencesAndLiterals cannot generate an uncompressed block");
+ } else {
+ U32 cBlockHeader;
+ assert(compressedSeqsSize > 1); /* no RLE */
+ /* Error checking and repcodes update */
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
+ if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ /* Write block header into beginning of block*/
+ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
+ DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize);
+ }
+
+ cSize += cBlockSize;
+ op += cBlockSize;
+ dstCapacity -= cBlockSize;
+ cctx->isFirstBlock = 0;
+ DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity);
+
+ if (lastBlock) {
+ assert(nbSequences == 0);
+ break;
+ }
+ }
+
+ RETURN_ERROR_IF(litSize != 0, externalSequences_invalid, "literals must be entirely and exactly consumed");
+ RETURN_ERROR_IF(remaining != 0, externalSequences_invalid, "Sequences must represent a total of exactly srcSize=%zu", srcSize);
+ DEBUGLOG(4, "cSize final total: %zu", cSize);
+ return cSize;
+}
+
+size_t
+ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* literals, size_t litSize, size_t litCapacity,
+ size_t decompressedSize)
+{
+ BYTE* op = (BYTE*)dst;
+ size_t cSize = 0;
+
+ /* Transparent initialization stage, same as compressStream2() */
+ DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals (dstCapacity=%zu)", dstCapacity);
+ assert(cctx != NULL);
+ if (litCapacity < litSize) {
+ RETURN_ERROR(workSpace_tooSmall, "literals buffer is not large enough: must be at least 8 bytes larger than litSize (risk of read out-of-bound)");
+ }
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, decompressedSize), "CCtx initialization failed");
+
+ if (cctx->appliedParams.blockDelimiters == ZSTD_sf_noBlockDelimiters) {
+ RETURN_ERROR(frameParameter_unsupported, "This mode is only compatible with explicit delimiters");
+ }
+ if (cctx->appliedParams.validateSequences) {
+ RETURN_ERROR(parameter_unsupported, "This mode is not compatible with Sequence validation");
+ }
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ RETURN_ERROR(frameParameter_unsupported, "this mode is not compatible with frame checksum");
+ }
+
+ /* Begin writing output, starting with frame header */
+ { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity,
+ &cctx->appliedParams, decompressedSize, cctx->dictID);
+ op += frameHeaderSize;
+ assert(frameHeaderSize <= dstCapacity);
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ }
+
+ /* Now generate compressed blocks */
+ { size_t const cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx,
+ op, dstCapacity,
+ inSeqs, inSeqsSize,
+ literals, litSize, decompressedSize);
+ FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!");
+ cSize += cBlocksSize;
+ assert(cBlocksSize <= dstCapacity);
+ dstCapacity -= cBlocksSize;
+ }
+
+ DEBUGLOG(4, "Final compressed size: %zu", cSize);
return cSize;
}
/*====== Finalize ======*/
+static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs)
+{
+ const ZSTD_inBuffer nullInput = { NULL, 0, 0 };
+ const int stableInput = (zcs->appliedParams.inBufferMode == ZSTD_bm_stable);
+ return stableInput ? zcs->expectedInBuffer : nullInput;
+}
+
/*! ZSTD_flushStream() :
* @return : amount of data remaining to flush */
size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
- ZSTD_inBuffer input = { NULL, 0, 0 };
+ ZSTD_inBuffer input = inBuffer_forEndFlush(zcs);
+ input.size = input.pos; /* do not ingest more input during flush */
return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
}
-
size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
- ZSTD_inBuffer input = { NULL, 0, 0 };
+ ZSTD_inBuffer input = inBuffer_forEndFlush(zcs);
size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
- FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
+ FORWARD_IF_ERROR(remainingToFlush , "ZSTD_compressStream2(,,ZSTD_e_end) failed");
if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
/* single thread mode : attempt to calculate remaining to flush more precisely */
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
@@ -6046,7 +7522,7 @@ static void ZSTD_dedicatedDictSearch_revertCParams(
}
}
-static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
{
switch (mode) {
case ZSTD_cpm_unknown:
@@ -6070,8 +7546,8 @@ static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMo
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
* Use dictSize == 0 for unknown or unused.
- * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+ * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
{
U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
@@ -6092,7 +7568,7 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel,
cp.targetLength = (unsigned)(-clampedCompressionLevel);
}
/* refine parameters based on srcSize & dictSize */
- return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto);
}
}
@@ -6109,7 +7585,9 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
+static ZSTD_parameters
+ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode)
+{
ZSTD_parameters params;
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
@@ -6123,7 +7601,34 @@ static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned lo
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
-ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
+{
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
}
+
+void ZSTD_registerSequenceProducer(
+ ZSTD_CCtx* zc,
+ void* extSeqProdState,
+ ZSTD_sequenceProducer_F extSeqProdFunc)
+{
+ assert(zc != NULL);
+ ZSTD_CCtxParams_registerSequenceProducer(
+ &zc->requestedParams, extSeqProdState, extSeqProdFunc
+ );
+}
+
+void ZSTD_CCtxParams_registerSequenceProducer(
+ ZSTD_CCtx_params* params,
+ void* extSeqProdState,
+ ZSTD_sequenceProducer_F extSeqProdFunc)
+{
+ assert(params != NULL);
+ if (extSeqProdFunc != NULL) {
+ params->extSeqProdFunc = extSeqProdFunc;
+ params->extSeqProdState = extSeqProdState;
+ } else {
+ params->extSeqProdFunc = NULL;
+ params->extSeqProdState = NULL;
+ }
+}
diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
index 71697a11ae30..b10978385876 100644
--- a/lib/zstd/compress/zstd_compress_internal.h
+++ b/lib/zstd/compress/zstd_compress_internal.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -20,7 +21,8 @@
***************************************/
#include "../common/zstd_internal.h"
#include "zstd_cwksp.h"
-
+#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */
+#include "zstd_preSplit.h" /* ZSTD_SLIPBLOCK_WORKSPACESIZE */
/*-*************************************
* Constants
@@ -32,7 +34,7 @@
It's not a big deal though : candidate will just be sorted again.
Additionally, candidate position 1 will be lost.
But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
- The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
+ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table reuse with a different strategy.
This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
@@ -76,6 +78,70 @@ typedef struct {
} ZSTD_entropyCTables_t;
/* *********************************************
+* Sequences *
+***********************************************/
+typedef struct SeqDef_s {
+ U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
+ U16 litLength;
+ U16 mlBase; /* mlBase == matchLength - MINMATCH */
+} SeqDef;
+
+/* Controls whether seqStore has a single "long" litLength or matchLength. See SeqStore_t. */
+typedef enum {
+ ZSTD_llt_none = 0, /* no longLengthType */
+ ZSTD_llt_literalLength = 1, /* represents a long literal */
+ ZSTD_llt_matchLength = 2 /* represents a long match */
+} ZSTD_longLengthType_e;
+
+typedef struct {
+ SeqDef* sequencesStart;
+ SeqDef* sequences; /* ptr to end of sequences */
+ BYTE* litStart;
+ BYTE* lit; /* ptr to end of literals */
+ BYTE* llCode;
+ BYTE* mlCode;
+ BYTE* ofCode;
+ size_t maxNbSeq;
+ size_t maxNbLit;
+
+ /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
+ * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
+ * the existing value of the litLength or matchLength by 0x10000.
+ */
+ ZSTD_longLengthType_e longLengthType;
+ U32 longLengthPos; /* Index of the sequence to apply long length modification to */
+} SeqStore_t;
+
+typedef struct {
+ U32 litLength;
+ U32 matchLength;
+} ZSTD_SequenceLength;
+
+/*
+ * Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences
+ * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
+ */
+MEM_STATIC ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t const* seqStore, SeqDef const* seq)
+{
+ ZSTD_SequenceLength seqLen;
+ seqLen.litLength = seq->litLength;
+ seqLen.matchLength = seq->mlBase + MINMATCH;
+ if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
+ seqLen.litLength += 0x10000;
+ }
+ if (seqStore->longLengthType == ZSTD_llt_matchLength) {
+ seqLen.matchLength += 0x10000;
+ }
+ }
+ return seqLen;
+}
+
+const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
+int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+
+
+/* *********************************************
* Entropy buffer statistics structs and funcs *
***********************************************/
/* ZSTD_hufCTablesMetadata_t :
@@ -84,7 +150,7 @@ typedef struct {
* hufDesSize refers to the size of huffman tree description in bytes.
* This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */
typedef struct {
- symbolEncodingType_e hType;
+ SymbolEncodingType_e hType;
BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
size_t hufDesSize;
} ZSTD_hufCTablesMetadata_t;
@@ -95,9 +161,9 @@ typedef struct {
* fseTablesSize refers to the size of fse tables in bytes.
* This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */
typedef struct {
- symbolEncodingType_e llType;
- symbolEncodingType_e ofType;
- symbolEncodingType_e mlType;
+ SymbolEncodingType_e llType;
+ SymbolEncodingType_e ofType;
+ SymbolEncodingType_e mlType;
BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
size_t fseTablesSize;
size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
@@ -111,12 +177,13 @@ typedef struct {
/* ZSTD_buildBlockEntropyStats() :
* Builds entropy for the block.
* @return : 0 on success or error code */
-size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
- const ZSTD_entropyCTables_t* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- const ZSTD_CCtx_params* cctxParams,
- ZSTD_entropyCTablesMetadata_t* entropyMetadata,
- void* workspace, size_t wkspSize);
+size_t ZSTD_buildBlockEntropyStats(
+ const SeqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize);
/* *******************************
* Compression internals structs *
@@ -140,28 +207,29 @@ typedef struct {
stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
size_t size; /* The number of sequences. <= capacity. */
size_t capacity; /* The capacity starting from `seq` pointer */
-} rawSeqStore_t;
+} RawSeqStore_t;
-UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
+UNUSED_ATTR static const RawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
typedef struct {
- int price;
- U32 off;
- U32 mlen;
- U32 litlen;
- U32 rep[ZSTD_REP_NUM];
+ int price; /* price from beginning of segment to this position */
+ U32 off; /* offset of previous match */
+ U32 mlen; /* length of previous match */
+ U32 litlen; /* nb of literals since previous match */
+ U32 rep[ZSTD_REP_NUM]; /* offset history after previous match */
} ZSTD_optimal_t;
typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+#define ZSTD_OPT_SIZE (ZSTD_OPT_NUM+3)
typedef struct {
/* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
unsigned* litFreq; /* table of literals statistics, of size 256 */
unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
- ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
- ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_SIZE */
+ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */
U32 litSum; /* nb of literals */
U32 litLengthSum; /* nb of litLength codes */
@@ -173,7 +241,7 @@ typedef struct {
U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
- ZSTD_paramSwitch_e literalCompressionMode;
+ ZSTD_ParamSwitch_e literalCompressionMode;
} optState_t;
typedef struct {
@@ -195,11 +263,11 @@ typedef struct {
#define ZSTD_WINDOW_START_INDEX 2
-typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+typedef struct ZSTD_MatchState_t ZSTD_MatchState_t;
#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
-struct ZSTD_matchState_t {
+struct ZSTD_MatchState_t {
ZSTD_window_t window; /* State for window round buffer management */
U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
* When loadedDictEnd != 0, a dictionary is in use, and still valid.
@@ -212,28 +280,42 @@ struct ZSTD_matchState_t {
U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
- U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
+ BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
+ U64 hashSalt; /* For row-based matchFinder: salts the hash for reuse of tag table */
+ U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */
U32* hashTable;
U32* hashTable3;
U32* chainTable;
- U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
+ int forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
int dedicatedDictSearch; /* Indicates whether this matchState is using the
* dedicated dictionary search structure.
*/
optState_t opt; /* optimal parser state */
- const ZSTD_matchState_t* dictMatchState;
+ const ZSTD_MatchState_t* dictMatchState;
ZSTD_compressionParameters cParams;
- const rawSeqStore_t* ldmSeqStore;
+ const RawSeqStore_t* ldmSeqStore;
+
+ /* Controls prefetching in some dictMatchState matchfinders.
+ * This behavior is controlled from the cctx ms.
+ * This parameter has no effect in the cdict ms. */
+ int prefetchCDictTables;
+
+ /* When == 0, lazy match finders insert every position.
+ * When != 0, lazy match finders only insert positions they search.
+ * This allows them to skip much faster over incompressible data,
+ * at a small cost to compression ratio.
+ */
+ int lazySkipping;
};
typedef struct {
ZSTD_compressedBlockState_t* prevCBlock;
ZSTD_compressedBlockState_t* nextCBlock;
- ZSTD_matchState_t matchState;
+ ZSTD_MatchState_t matchState;
} ZSTD_blockState_t;
typedef struct {
@@ -260,7 +342,7 @@ typedef struct {
} ldmState_t;
typedef struct {
- ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
+ ZSTD_ParamSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
U32 hashLog; /* Log size of hashTable */
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
U32 minMatchLength; /* Minimum match length */
@@ -291,7 +373,7 @@ struct ZSTD_CCtx_params_s {
* There is no guarantee that hint is close to actual source size */
ZSTD_dictAttachPref_e attachDictPref;
- ZSTD_paramSwitch_e literalCompressionMode;
+ ZSTD_ParamSwitch_e literalCompressionMode;
/* Multithreading: used to pass parameters to mtctx */
int nbWorkers;
@@ -310,24 +392,54 @@ struct ZSTD_CCtx_params_s {
ZSTD_bufferMode_e outBufferMode;
/* Sequence compression API */
- ZSTD_sequenceFormat_e blockDelimiters;
+ ZSTD_SequenceFormat_e blockDelimiters;
int validateSequences;
- /* Block splitting */
- ZSTD_paramSwitch_e useBlockSplitter;
+ /* Block splitting
+ * @postBlockSplitter executes split analysis after sequences are produced,
+ * it's more accurate but consumes more resources.
+ * @preBlockSplitter_level splits before knowing sequences,
+ * it's more approximative but also cheaper.
+ * Valid @preBlockSplitter_level values range from 0 to 6 (included).
+ * 0 means auto, 1 means do not split,
+ * then levels are sorted in increasing cpu budget, from 2 (fastest) to 6 (slowest).
+ * Highest @preBlockSplitter_level combines well with @postBlockSplitter.
+ */
+ ZSTD_ParamSwitch_e postBlockSplitter;
+ int preBlockSplitter_level;
+
+ /* Adjust the max block size*/
+ size_t maxBlockSize;
/* Param for deciding whether to use row-based matchfinder */
- ZSTD_paramSwitch_e useRowMatchFinder;
+ ZSTD_ParamSwitch_e useRowMatchFinder;
/* Always load a dictionary in ext-dict mode (not prefix mode)? */
int deterministicRefPrefix;
/* Internal use, for createCCtxParams() and freeCCtxParams() only */
ZSTD_customMem customMem;
+
+ /* Controls prefetching in some dictMatchState matchfinders */
+ ZSTD_ParamSwitch_e prefetchCDictTables;
+
+ /* Controls whether zstd will fall back to an internal matchfinder
+ * if the external matchfinder returns an error code. */
+ int enableMatchFinderFallback;
+
+ /* Parameters for the external sequence producer API.
+ * Users set these parameters through ZSTD_registerSequenceProducer().
+ * It is not possible to set these parameters individually through the public API. */
+ void* extSeqProdState;
+ ZSTD_sequenceProducer_F extSeqProdFunc;
+
+ /* Controls repcode search in external sequence parsing */
+ ZSTD_ParamSwitch_e searchForExternalRepcodes;
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
+#define TMP_WORKSPACE_SIZE (MAX(ENTROPY_WORKSPACE_SIZE, ZSTD_SLIPBLOCK_WORKSPACESIZE))
/*
* Indicates whether this compression proceeds directly from user-provided
@@ -345,11 +457,11 @@ typedef enum {
*/
#define ZSTD_MAX_NB_BLOCK_SPLITS 196
typedef struct {
- seqStore_t fullSeqStoreChunk;
- seqStore_t firstHalfSeqStore;
- seqStore_t secondHalfSeqStore;
- seqStore_t currSeqStore;
- seqStore_t nextSeqStore;
+ SeqStore_t fullSeqStoreChunk;
+ SeqStore_t firstHalfSeqStore;
+ SeqStore_t secondHalfSeqStore;
+ SeqStore_t currSeqStore;
+ SeqStore_t nextSeqStore;
U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS];
ZSTD_entropyCTablesMetadata_t entropyMetadata;
@@ -366,7 +478,7 @@ struct ZSTD_CCtx_s {
size_t dictContentSize;
ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
- size_t blockSize;
+ size_t blockSizeMax;
unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
unsigned long long consumedSrcSize;
unsigned long long producedCSize;
@@ -378,13 +490,14 @@ struct ZSTD_CCtx_s {
int isFirstBlock;
int initialized;
- seqStore_t seqStore; /* sequences storage ptrs */
+ SeqStore_t seqStore; /* sequences storage ptrs */
ldmState_t ldmState; /* long distance matching state */
rawSeq* ldmSequences; /* Storage for the ldm output sequences */
size_t maxNbLdmSequences;
- rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
+ RawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
ZSTD_blockState_t blockState;
- U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
+ void* tmpWorkspace; /* used as substitute of stack space - must be aligned for S64 type */
+ size_t tmpWkspSize;
/* Whether we are streaming or not */
ZSTD_buffered_policy_e bufferedPolicy;
@@ -404,6 +517,7 @@ struct ZSTD_CCtx_s {
/* Stable in/out buffer verification */
ZSTD_inBuffer expectedInBuffer;
+ size_t stableIn_notConsumed; /* nb bytes within stable input buffer that are said to be consumed but are not */
size_t expectedOutBufferSize;
/* Dictionary */
@@ -417,9 +531,14 @@ struct ZSTD_CCtx_s {
/* Workspace for block splitter */
ZSTD_blockSplitCtx blockSplitCtx;
+
+ /* Buffer for output from external sequence producer */
+ ZSTD_Sequence* extSeqBuf;
+ size_t extSeqBufCapacity;
};
typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
+typedef enum { ZSTD_tfp_forCCtx, ZSTD_tfp_forCDict } ZSTD_tableFillPurpose_e;
typedef enum {
ZSTD_noDict = 0,
@@ -441,17 +560,17 @@ typedef enum {
* In this mode we take both the source size and the dictionary size
* into account when selecting and adjusting the parameters.
*/
- ZSTD_cpm_unknown = 3, /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
+ ZSTD_cpm_unknown = 3 /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
* We don't know what these parameters are for. We default to the legacy
* behavior of taking both the source size and the dict size into account
* when selecting and adjusting parameters.
*/
-} ZSTD_cParamMode_e;
+} ZSTD_CParamMode_e;
-typedef size_t (*ZSTD_blockCompressor) (
- ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+typedef size_t (*ZSTD_BlockCompressor_f) (
+ ZSTD_MatchState_t* bs, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
+ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
@@ -497,12 +616,33 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
return 1;
}
+/* ZSTD_selectAddr:
+ * @return index >= lowLimit ? candidate : backup,
+ * tries to force branchless codegen. */
+MEM_STATIC const BYTE*
+ZSTD_selectAddr(U32 index, U32 lowLimit, const BYTE* candidate, const BYTE* backup)
+{
+#if defined(__x86_64__)
+ __asm__ (
+ "cmp %1, %2\n"
+ "cmova %3, %0\n"
+ : "+r"(candidate)
+ : "r"(index), "r"(lowLimit), "r"(backup)
+ );
+ return candidate;
+#else
+ return index >= lowLimit ? candidate : backup;
+#endif
+}
+
/* ZSTD_noCompressBlock() :
* Writes uncompressed block to dst buffer from given src.
* Returns the size of the block */
-MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
+MEM_STATIC size_t
+ZSTD_noCompressBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
{
U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
+ DEBUGLOG(5, "ZSTD_noCompressBlock (srcSize=%zu, dstCapacity=%zu)", srcSize, dstCapacity);
RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
dstSize_tooSmall, "dst buf too small for uncompressed block");
MEM_writeLE24(dst, cBlockHeader24);
@@ -510,7 +650,8 @@ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const voi
return ZSTD_blockHeaderSize + srcSize;
}
-MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
+MEM_STATIC size_t
+ZSTD_rleCompressBlock(void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
{
BYTE* const op = (BYTE*)dst;
U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
@@ -529,7 +670,7 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
{
U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
- assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat));
return (srcSize >> minlog) + 2;
}
@@ -565,29 +706,68 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con
while (ip < iend) *op++ = *ip++;
}
-#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
-#define STORE_REPCODE_1 STORE_REPCODE(1)
-#define STORE_REPCODE_2 STORE_REPCODE(2)
-#define STORE_REPCODE_3 STORE_REPCODE(3)
-#define STORE_REPCODE(r) (assert((r)>=1), assert((r)<=3), (r)-1)
-#define STORE_OFFSET(o) (assert((o)>0), o + ZSTD_REP_MOVE)
-#define STORED_IS_OFFSET(o) ((o) > ZSTD_REP_MOVE)
-#define STORED_IS_REPCODE(o) ((o) <= ZSTD_REP_MOVE)
-#define STORED_OFFSET(o) (assert(STORED_IS_OFFSET(o)), (o)-ZSTD_REP_MOVE)
-#define STORED_REPCODE(o) (assert(STORED_IS_REPCODE(o)), (o)+1) /* returns ID 1,2,3 */
-#define STORED_TO_OFFBASE(o) ((o)+1)
-#define OFFBASE_TO_STORED(o) ((o)-1)
+
+#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1)
+#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2)
+#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3)
+#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */
+#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM)
+#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM)
+#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM)
+#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM)
+#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */
+
+/*! ZSTD_storeSeqOnly() :
+ * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t.
+ * Literals themselves are not copied, but @litPtr is updated.
+ * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE().
+ * @matchLength : must be >= MINMATCH
+*/
+HINT_INLINE UNUSED_ATTR void
+ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr,
+ size_t litLength,
+ U32 offBase,
+ size_t matchLength)
+{
+ assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
+
+ /* literal Length */
+ assert(litLength <= ZSTD_BLOCKSIZE_MAX);
+ if (UNLIKELY(litLength>0xFFFF)) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_literalLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+ /* match offset */
+ seqStorePtr->sequences[0].offBase = offBase;
+
+ /* match Length */
+ assert(matchLength <= ZSTD_BLOCKSIZE_MAX);
+ assert(matchLength >= MINMATCH);
+ { size_t const mlBase = matchLength - MINMATCH;
+ if (UNLIKELY(mlBase>0xFFFF)) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_matchLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].mlBase = (U16)mlBase;
+ }
+
+ seqStorePtr->sequences++;
+}
/*! ZSTD_storeSeq() :
- * Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t.
- * @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET().
+ * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t.
+ * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE().
* @matchLength : must be >= MINMATCH
- * Allowed to overread literals up to litLimit.
+ * Allowed to over-read literals up to litLimit.
*/
HINT_INLINE UNUSED_ATTR void
-ZSTD_storeSeq(seqStore_t* seqStorePtr,
+ZSTD_storeSeq(SeqStore_t* seqStorePtr,
size_t litLength, const BYTE* literals, const BYTE* litLimit,
- U32 offBase_minus1,
+ U32 offBase,
size_t matchLength)
{
BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
@@ -596,8 +776,8 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
static const BYTE* g_start = NULL;
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
- DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
- pos, (U32)litLength, (U32)matchLength, (U32)offBase_minus1);
+ DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u",
+ pos, (U32)litLength, (U32)matchLength, (U32)offBase);
}
#endif
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
@@ -607,9 +787,9 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
assert(literals + litLength <= litLimit);
if (litEnd <= litLimit_w) {
/* Common case we can use wildcopy.
- * First copy 16 bytes, because literals are likely short.
- */
- assert(WILDCOPY_OVERLENGTH >= 16);
+ * First copy 16 bytes, because literals are likely short.
+ */
+ ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16);
ZSTD_copy16(seqStorePtr->lit, literals);
if (litLength > 16) {
ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
@@ -619,44 +799,22 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
}
seqStorePtr->lit += litLength;
- /* literal Length */
- if (litLength>0xFFFF) {
- assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
- seqStorePtr->longLengthType = ZSTD_llt_literalLength;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].litLength = (U16)litLength;
-
- /* match offset */
- seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1);
-
- /* match Length */
- assert(matchLength >= MINMATCH);
- { size_t const mlBase = matchLength - MINMATCH;
- if (mlBase>0xFFFF) {
- assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
- seqStorePtr->longLengthType = ZSTD_llt_matchLength;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].mlBase = (U16)mlBase;
- }
-
- seqStorePtr->sequences++;
+ ZSTD_storeSeqOnly(seqStorePtr, litLength, offBase, matchLength);
}
/* ZSTD_updateRep() :
* updates in-place @rep (array of repeat offsets)
- * @offBase_minus1 : sum-type, with same numeric representation as ZSTD_storeSeq()
+ * @offBase : sum-type, using numeric representation of ZSTD_storeSeq()
*/
MEM_STATIC void
-ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
+ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
{
- if (STORED_IS_OFFSET(offBase_minus1)) { /* full offset */
+ if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */
rep[2] = rep[1];
rep[1] = rep[0];
- rep[0] = STORED_OFFSET(offBase_minus1);
+ rep[0] = OFFBASE_TO_OFFSET(offBase);
} else { /* repcode */
- U32 const repCode = STORED_REPCODE(offBase_minus1) - 1 + ll0;
+ U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0;
if (repCode > 0) { /* note : if repCode==0, no change */
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
rep[2] = (repCode >= 2) ? rep[1] : rep[2];
@@ -670,14 +828,14 @@ ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
typedef struct repcodes_s {
U32 rep[3];
-} repcodes_t;
+} Repcodes_t;
-MEM_STATIC repcodes_t
-ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
+MEM_STATIC Repcodes_t
+ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
{
- repcodes_t newReps;
+ Repcodes_t newReps;
ZSTD_memcpy(&newReps, rep, sizeof(newReps));
- ZSTD_updateRep(newReps.rep, offBase_minus1, ll0);
+ ZSTD_updateRep(newReps.rep, offBase, ll0);
return newReps;
}
@@ -685,59 +843,6 @@ ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0
/*-*************************************
* Match length counter
***************************************/
-static unsigned ZSTD_NbCommonBytes (size_t val)
-{
- if (MEM_isLittleEndian()) {
- if (MEM_64bits()) {
-# if (__GNUC__ >= 4)
- return (__builtin_ctzll((U64)val) >> 3);
-# else
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
- 0, 3, 1, 3, 1, 4, 2, 7,
- 0, 2, 3, 6, 1, 5, 3, 5,
- 1, 3, 4, 4, 2, 5, 6, 7,
- 7, 0, 1, 2, 3, 3, 4, 6,
- 2, 6, 5, 5, 3, 4, 5, 6,
- 7, 1, 2, 4, 6, 4, 4, 5,
- 7, 2, 6, 5, 7, 6, 7, 7 };
- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-# endif
- } else { /* 32 bits */
-# if (__GNUC__ >= 3)
- return (__builtin_ctz((U32)val) >> 3);
-# else
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
- 3, 2, 2, 1, 3, 2, 0, 1,
- 3, 3, 1, 2, 2, 2, 2, 0,
- 3, 1, 2, 0, 1, 0, 1, 1 };
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-# endif
- }
- } else { /* Big Endian CPU */
- if (MEM_64bits()) {
-# if (__GNUC__ >= 4)
- return (__builtin_clzll(val) >> 3);
-# else
- unsigned r;
- const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
- if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
- if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
- r += (!val);
- return r;
-# endif
- } else { /* 32 bits */
-# if (__GNUC__ >= 3)
- return (__builtin_clz((U32)val) >> 3);
-# else
- unsigned r;
- if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
- r += (!val);
- return r;
-# endif
- } }
-}
-
-
MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
{
const BYTE* const pStart = pIn;
@@ -771,8 +876,8 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
size_t const matchLength = ZSTD_count(ip, match, vEnd);
if (match + matchLength != mEnd) return matchLength;
DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
- DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
- DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+ DEBUGLOG(7, "distance from match beginning to end dictionary = %i", (int)(mEnd - match));
+ DEBUGLOG(7, "distance from current pos to end buffer = %i", (int)(iEnd - ip));
DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
@@ -783,32 +888,43 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
* Hashes
***************************************/
static const U32 prime3bytes = 506832829U;
-static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
-MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
+static U32 ZSTD_hash3(U32 u, U32 h, U32 s) { assert(h <= 32); return (((u << (32-24)) * prime3bytes) ^ s) >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h, 0); } /* only in zstd_opt.h */
+MEM_STATIC size_t ZSTD_hash3PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash3(MEM_readLE32(ptr), h, s); }
static const U32 prime4bytes = 2654435761U;
-static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
-static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+static U32 ZSTD_hash4(U32 u, U32 h, U32 s) { assert(h <= 32); return ((u * prime4bytes) ^ s) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h, 0); }
+static size_t ZSTD_hash4PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash4(MEM_readLE32(ptr), h, s); }
static const U64 prime5bytes = 889523592379ULL;
-static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
-static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+static size_t ZSTD_hash5(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-40)) * prime5bytes) ^ s) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h, 0); }
+static size_t ZSTD_hash5PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash5(MEM_readLE64(p), h, s); }
static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
-static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+static size_t ZSTD_hash6(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-48)) * prime6bytes) ^ s) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h, 0); }
+static size_t ZSTD_hash6PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash6(MEM_readLE64(p), h, s); }
static const U64 prime7bytes = 58295818150454627ULL;
-static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
-static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+static size_t ZSTD_hash7(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-56)) * prime7bytes) ^ s) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h, 0); }
+static size_t ZSTD_hash7PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash7(MEM_readLE64(p), h, s); }
static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
-static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
-static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+static size_t ZSTD_hash8(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u) * prime8bytes) ^ s) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h, 0); }
+static size_t ZSTD_hash8PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash8(MEM_readLE64(p), h, s); }
+
MEM_STATIC FORCE_INLINE_ATTR
size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
{
+ /* Although some of these hashes do support hBits up to 64, some do not.
+ * To be on the safe side, always avoid hBits > 32. */
+ assert(hBits <= 32);
+
switch(mls)
{
default:
@@ -820,6 +936,24 @@ size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
}
}
+MEM_STATIC FORCE_INLINE_ATTR
+size_t ZSTD_hashPtrSalted(const void* p, U32 hBits, U32 mls, const U64 hashSalt) {
+ /* Although some of these hashes do support hBits up to 64, some do not.
+ * To be on the safe side, always avoid hBits > 32. */
+ assert(hBits <= 32);
+
+ switch(mls)
+ {
+ default:
+ case 4: return ZSTD_hash4PtrS(p, hBits, (U32)hashSalt);
+ case 5: return ZSTD_hash5PtrS(p, hBits, hashSalt);
+ case 6: return ZSTD_hash6PtrS(p, hBits, hashSalt);
+ case 7: return ZSTD_hash7PtrS(p, hBits, hashSalt);
+ case 8: return ZSTD_hash8PtrS(p, hBits, hashSalt);
+ }
+}
+
+
/* ZSTD_ipow() :
* Return base^exponent.
*/
@@ -881,11 +1015,12 @@ MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64
/*-*************************************
* Round buffer management
***************************************/
-#if (ZSTD_WINDOWLOG_MAX_64 > 31)
-# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
-#endif
-/* Max current allowed */
-#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
+/* Max @current value allowed:
+ * In 32-bit mode: we want to avoid crossing the 2 GB limit,
+ * reducing risks of side effects in case of signed operations on indexes.
+ * In 64-bit mode: we want to ensure that adding the maximum job size (512 MB)
+ * doesn't overflow U32 index capacity (4 GB) */
+#define ZSTD_CURRENT_MAX (MEM_64bits() ? 3500U MB : 2000U MB)
/* Maximum chunk size before overflow correction needs to be called again */
#define ZSTD_CHUNKSIZE_MAX \
( ((U32)-1) /* Maximum ending current index */ \
@@ -925,7 +1060,7 @@ MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
* Inspects the provided matchState and figures out what dictMode should be
* passed to the compressor.
*/
-MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_MatchState_t *ms)
{
return ZSTD_window_hasExtDict(ms->window) ?
ZSTD_extDict :
@@ -1011,7 +1146,9 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
* The least significant cycleLog bits of the indices must remain the same,
* which may be 0. Every index up to maxDist in the past must be valid.
*/
-MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
U32 maxDist, void const* src)
{
/* preemptive overflow correction:
@@ -1112,7 +1249,7 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
const void* blockEnd,
U32 maxDist,
U32* loadedDictEndPtr,
- const ZSTD_matchState_t** dictMatchStatePtr)
+ const ZSTD_MatchState_t** dictMatchStatePtr)
{
U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
@@ -1157,7 +1294,7 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
const void* blockEnd,
U32 maxDist,
U32* loadedDictEndPtr,
- const ZSTD_matchState_t** dictMatchStatePtr)
+ const ZSTD_MatchState_t** dictMatchStatePtr)
{
assert(loadedDictEndPtr != NULL);
assert(dictMatchStatePtr != NULL);
@@ -1167,10 +1304,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
(unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
assert(blockEndIdx >= loadedDictEnd);
- if (blockEndIdx > loadedDictEnd + maxDist) {
+ if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) {
/* On reaching window size, dictionaries are invalidated.
* For simplification, if window size is reached anywhere within next block,
* the dictionary is invalidated for the full block.
+ *
+ * We also have to invalidate the dictionary if ZSTD_window_update() has detected
+ * non-contiguous segments, which means that loadedDictEnd != window->dictLimit.
+ * loadedDictEnd may be 0, if forceWindow is true, but in that case we never use
+ * dictMatchState, so setting it to NULL is not a problem.
*/
DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
*loadedDictEndPtr = 0;
@@ -1199,9 +1341,11 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
* forget about the extDict. Handles overlap of the prefix and extDict.
* Returns non-zero if the segment is contiguous.
*/
-MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
- void const* src, size_t srcSize,
- int forceNonContiguous)
+MEM_STATIC
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_window_update(ZSTD_window_t* window,
+ const void* src, size_t srcSize,
+ int forceNonContiguous)
{
BYTE const* const ip = (BYTE const*)src;
U32 contiguous = 1;
@@ -1228,8 +1372,9 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
/* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
if ( (ip+srcSize > window->dictBase + window->lowLimit)
& (ip < window->dictBase + window->dictLimit)) {
- ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
- U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ size_t const highInputIdx = (size_t)((ip + srcSize) - window->dictBase);
+ U32 const lowLimitMax = (highInputIdx > (size_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ assert(highInputIdx < UINT_MAX);
window->lowLimit = lowLimitMax;
DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
}
@@ -1239,7 +1384,7 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
/*
* Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
*/
-MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.lowLimit;
@@ -1256,7 +1401,7 @@ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, u
/*
* Returns the lowest allowed match index in the prefix.
*/
-MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog)
{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.dictLimit;
@@ -1269,6 +1414,13 @@ MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr,
return matchLowest;
}
+/* index_safety_check:
+ * intentional underflow : ensure repIndex isn't overlapping dict + prefix
+ * @return 1 if values are not overlapping,
+ * 0 otherwise */
+MEM_STATIC int ZSTD_index_overlap_check(const U32 prefixLowestIndex, const U32 repIndex) {
+ return ((U32)((prefixLowestIndex-1) - repIndex) >= 3);
+}
/* debug functions */
@@ -1302,7 +1454,42 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
#endif
+/* Short Cache */
+
+/* Normally, zstd matchfinders follow this flow:
+ * 1. Compute hash at ip
+ * 2. Load index from hashTable[hash]
+ * 3. Check if *ip == *(base + index)
+ * In dictionary compression, loading *(base + index) is often an L2 or even L3 miss.
+ *
+ * Short cache is an optimization which allows us to avoid step 3 most of the time
+ * when the data doesn't actually match. With short cache, the flow becomes:
+ * 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip.
+ * 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works.
+ * 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue.
+ *
+ * Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to
+ * dictMatchState matchfinders.
+ */
+#define ZSTD_SHORT_CACHE_TAG_BITS 8
+#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1)
+
+/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable.
+ * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */
+MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) {
+ size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
+ U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK);
+ assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0);
+ hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag;
+}
+/* Helper function for short cache matchfinders.
+ * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */
+MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) {
+ U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK;
+ U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK;
+ return tag1 == tag2;
+}
/* ===============================================================
* Shared internal declarations
@@ -1319,6 +1506,25 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
+typedef struct {
+ U32 idx; /* Index in array of ZSTD_Sequence */
+ U32 posInSequence; /* Position within sequence at idx */
+ size_t posInSrc; /* Number of bytes given by sequences provided so far */
+} ZSTD_SequencePosition;
+
+/* for benchmark */
+size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx,
+ const ZSTD_Sequence* const inSeqs, size_t nbSequences,
+ int const repcodeResolution);
+
+typedef struct {
+ size_t nbSequences;
+ size_t blockSize;
+ size_t litSize;
+} BlockSummary;
+
+BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs);
+
/* ==============================================================
* Private declarations
* These prototypes shall only be called from within lib/compress
@@ -1330,7 +1536,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
* Note: srcSizeHint == 0 means 0!
*/
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode);
/*! ZSTD_initCStream_internal() :
* Private use only. Init streaming operation.
@@ -1342,7 +1548,7 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
-void ZSTD_resetSeqStore(seqStore_t* ssPtr);
+void ZSTD_resetSeqStore(SeqStore_t* ssPtr);
/*! ZSTD_getCParamsFromCDict() :
* as the name implies */
@@ -1381,11 +1587,10 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
* This cannot be used when long range matching is enabled.
* Zstd will use these sequences, and pass the literals to a secondary block
* compressor.
- * @return : An error code on failure.
* NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
* access and data corruption.
*/
-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
+void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
/* ZSTD_cycleLog() :
* condition for correct operation : hashLog > 1 */
@@ -1396,4 +1601,28 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
*/
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
+/* Returns 1 if an external sequence producer is registered, otherwise returns 0. */
+MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params* params) {
+ return params->extSeqProdFunc != NULL;
+}
+
+/* ===============================================================
+ * Deprecated definitions that are still used internally to avoid
+ * deprecation warnings. These functions are exactly equivalent to
+ * their public variants, but avoid the deprecation warnings.
+ * =============================================================== */
+
+size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
#endif /* ZSTD_COMPRESS_H */
diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c
index 52b0a8059aba..ec39b4299b6f 100644
--- a/lib/zstd/compress/zstd_compress_literals.c
+++ b/lib/zstd/compress/zstd_compress_literals.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -13,11 +14,36 @@
***************************************/
#include "zstd_compress_literals.h"
+
+/* **************************************************************
+* Debug Traces
+****************************************************************/
+#if DEBUGLEVEL >= 2
+
+static size_t showHexa(const void* src, size_t srcSize)
+{
+ const BYTE* const ip = (const BYTE*)src;
+ size_t u;
+ for (u=0; u<srcSize; u++) {
+ RAWLOG(5, " %02X", ip[u]); (void)ip;
+ }
+ RAWLOG(5, " \n");
+ return srcSize;
+}
+
+#endif
+
+
+/* **************************************************************
+* Literals compression - special cases
+****************************************************************/
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
BYTE* const ostart = (BYTE*)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
+ DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity);
+
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
switch(flSize)
@@ -36,16 +62,30 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
}
ZSTD_memcpy(ostart + flSize, src, srcSize);
- DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
+ DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
return srcSize + flSize;
}
+static int allBytesIdentical(const void* src, size_t srcSize)
+{
+ assert(srcSize >= 1);
+ assert(src != NULL);
+ { const BYTE b = ((const BYTE*)src)[0];
+ size_t p;
+ for (p=1; p<srcSize; p++) {
+ if (((const BYTE*)src)[p] != b) return 0;
+ }
+ return 1;
+ }
+}
+
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
BYTE* const ostart = (BYTE*)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
- (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
+ assert(dstCapacity >= 4); (void)dstCapacity;
+ assert(allBytesIdentical(src, srcSize));
switch(flSize)
{
@@ -63,28 +103,51 @@ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void*
}
ostart[flSize] = *(const BYTE*)src;
- DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
+ DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1);
return flSize+1;
}
-size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
- ZSTD_hufCTables_t* nextHuf,
- ZSTD_strategy strategy, int disableLiteralCompression,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- void* entropyWorkspace, size_t entropyWorkspaceSize,
- const int bmi2,
- unsigned suspectUncompressible)
+/* ZSTD_minLiteralsToCompress() :
+ * returns minimal amount of literals
+ * for literal compression to even be attempted.
+ * Minimum is made tighter as compression strategy increases.
+ */
+static size_t
+ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat)
+{
+ assert((int)strategy >= 0);
+ assert((int)strategy <= 9);
+ /* btultra2 : min 8 bytes;
+ * then 2x larger for each successive compression strategy
+ * max threshold 64 bytes */
+ { int const shift = MIN(9-(int)strategy, 3);
+ size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift;
+ DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc);
+ return mintc;
+ }
+}
+
+size_t ZSTD_compressLiterals (
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy,
+ int disableLiteralCompression,
+ int suspectUncompressible,
+ int bmi2)
{
- size_t const minGain = ZSTD_minGain(srcSize, strategy);
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
BYTE* const ostart = (BYTE*)dst;
U32 singleStream = srcSize < 256;
- symbolEncodingType_e hType = set_compressed;
+ SymbolEncodingType_e hType = set_compressed;
size_t cLitSize;
- DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
- disableLiteralCompression, (U32)srcSize);
+ DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
+ disableLiteralCompression, (U32)srcSize, dstCapacity);
+
+ DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize));
/* Prepare nextEntropy assuming reusing the existing table */
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
@@ -92,40 +155,51 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
if (disableLiteralCompression)
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- /* small ? don't even attempt compression (speed opt) */
-# define COMPRESS_LITERALS_SIZE_MIN 63
- { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
- if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- }
+ /* if too small, don't even attempt compression (speed opt) */
+ if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode))
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
{ HUF_repeat repeat = prevHuf->repeatMode;
- int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+ int const flags = 0
+ | (bmi2 ? HUF_flags_bmi2 : 0)
+ | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0)
+ | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0)
+ | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0);
+
+ typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int);
+ huf_compress_f huf_compress;
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
- cLitSize = singleStream ?
- HUF_compress1X_repeat(
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
- HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) :
- HUF_compress4X_repeat(
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
- HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible);
+ huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat;
+ cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize,
+ src, srcSize,
+ HUF_SYMBOLVALUE_MAX, LitHufLog,
+ entropyWorkspace, entropyWorkspaceSize,
+ (HUF_CElt*)nextHuf->CTable,
+ &repeat, flags);
+ DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize);
if (repeat != HUF_repeat_none) {
/* reused the existing table */
- DEBUGLOG(5, "Reusing previous huffman table");
+ DEBUGLOG(5, "reusing statistics from previous huffman block");
hType = set_repeat;
}
}
- if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
- }
+ { size_t const minGain = ZSTD_minGain(srcSize, strategy);
+ if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ } }
if (cLitSize==1) {
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
- return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
- }
+ /* A return value of 1 signals that the alphabet consists of a single symbol.
+ * However, in some rare circumstances, it could be the compressed size (a single byte).
+ * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`.
+ * (it's also necessary to not generate statistics).
+ * Therefore, in such a case, actively check that all bytes are identical. */
+ if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+ } }
if (hType == set_compressed) {
/* using a newly constructed table */
@@ -136,16 +210,19 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
switch(lhSize)
{
case 3: /* 2 - 2 - 10 - 10 */
- { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+ if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
+ { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
MEM_writeLE24(ostart, lhc);
break;
}
case 4: /* 2 - 2 - 14 - 14 */
+ assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
{ U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
MEM_writeLE32(ostart, lhc);
break;
}
case 5: /* 2 - 2 - 18 - 18 */
+ assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
{ U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
MEM_writeLE32(ostart, lhc);
ostart[4] = (BYTE)(cLitSize >> 10);
diff --git a/lib/zstd/compress/zstd_compress_literals.h b/lib/zstd/compress/zstd_compress_literals.h
index 9775fb97cb70..a2a85d6b69e5 100644
--- a/lib/zstd/compress/zstd_compress_literals.h
+++ b/lib/zstd/compress/zstd_compress_literals.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,16 +17,24 @@
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+/* ZSTD_compressRleLiteralsBlock() :
+ * Conditions :
+ * - All bytes in @src are identical
+ * - dstCapacity >= 4 */
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-/* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
-size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
- ZSTD_hufCTables_t* nextHuf,
- ZSTD_strategy strategy, int disableLiteralCompression,
- void* dst, size_t dstCapacity,
+/* ZSTD_compressLiterals():
+ * @entropyWorkspace: must be aligned on 4-bytes boundaries
+ * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE
+ * @suspectUncompressible: sampling checks, to potentially skip huffman coding
+ */
+size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
void* entropyWorkspace, size_t entropyWorkspaceSize,
- const int bmi2,
- unsigned suspectUncompressible);
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy, int disableLiteralCompression,
+ int suspectUncompressible,
+ int bmi2);
#endif /* ZSTD_COMPRESS_LITERALS_H */
diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c
index 21ddc1b37acf..256980c9d85a 100644
--- a/lib/zstd/compress/zstd_compress_sequences.c
+++ b/lib/zstd/compress/zstd_compress_sequences.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -58,7 +59,7 @@ static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
{
/* Heuristic: This should cover most blocks <= 16K and
* start to fade out after 16K to about 32K depending on
- * comprssibility.
+ * compressibility.
*/
return nbSeq >= 2048;
}
@@ -153,20 +154,20 @@ size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
return cost >> 8;
}
-symbolEncodingType_e
+SymbolEncodingType_e
ZSTD_selectEncodingType(
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
FSE_CTable const* prevCTable,
short const* defaultNorm, U32 defaultNormLog,
- ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_DefaultPolicy_e const isDefaultAllowed,
ZSTD_strategy const strategy)
{
ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
if (mostFrequent == nbSeq) {
*repeatMode = FSE_repeat_none;
if (isDefaultAllowed && nbSeq <= 2) {
- /* Prefer set_basic over set_rle when there are 2 or less symbols,
+ /* Prefer set_basic over set_rle when there are 2 or fewer symbols,
* since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
* If basic encoding isn't possible, always choose RLE.
*/
@@ -241,7 +242,7 @@ typedef struct {
size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity,
- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type,
unsigned* count, U32 max,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
@@ -293,7 +294,7 @@ ZSTD_encodeSequences_body(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
{
BIT_CStream_t blockStream;
FSE_CState_t stateMatchLength;
@@ -387,7 +388,7 @@ ZSTD_encodeSequences_default(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
{
return ZSTD_encodeSequences_body(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
@@ -405,7 +406,7 @@ ZSTD_encodeSequences_bmi2(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
{
return ZSTD_encodeSequences_body(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
@@ -421,7 +422,7 @@ size_t ZSTD_encodeSequences(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
+ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
{
DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
#if DYNAMIC_BMI2
diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h
index 7991364c2f71..14fdccb6547f 100644
--- a/lib/zstd/compress/zstd_compress_sequences.h
+++ b/lib/zstd/compress/zstd_compress_sequences.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,26 +12,27 @@
#ifndef ZSTD_COMPRESS_SEQUENCES_H
#define ZSTD_COMPRESS_SEQUENCES_H
+#include "zstd_compress_internal.h" /* SeqDef */
#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
-#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
+#include "../common/zstd_internal.h" /* SymbolEncodingType_e, ZSTD_strategy */
typedef enum {
ZSTD_defaultDisallowed = 0,
ZSTD_defaultAllowed = 1
-} ZSTD_defaultPolicy_e;
+} ZSTD_DefaultPolicy_e;
-symbolEncodingType_e
+SymbolEncodingType_e
ZSTD_selectEncodingType(
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
FSE_CTable const* prevCTable,
short const* defaultNorm, U32 defaultNormLog,
- ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_DefaultPolicy_e const isDefaultAllowed,
ZSTD_strategy const strategy);
size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity,
- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type,
unsigned* count, U32 max,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
@@ -42,7 +44,7 @@ size_t ZSTD_encodeSequences(
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
+ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
size_t ZSTD_fseBitCost(
FSE_CTable const* ctable,
diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
index 17d836cc84e8..dc12d64e935c 100644
--- a/lib/zstd/compress/zstd_compress_superblock.c
+++ b/lib/zstd/compress/zstd_compress_superblock.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -36,13 +37,14 @@
* If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
* and the following sub-blocks' literals sections will be Treeless_Literals_Block.
* @return : compressed size of literals section of a sub-block
- * Or 0 if it unable to compress.
+ * Or 0 if unable to compress.
* Or error code */
-static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
- const ZSTD_hufCTablesMetadata_t* hufMetadata,
- const BYTE* literals, size_t litSize,
- void* dst, size_t dstSize,
- const int bmi2, int writeEntropy, int* entropyWritten)
+static size_t
+ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const BYTE* literals, size_t litSize,
+ void* dst, size_t dstSize,
+ const int bmi2, int writeEntropy, int* entropyWritten)
{
size_t const header = writeEntropy ? 200 : 0;
size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
@@ -50,11 +52,9 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart + lhSize;
U32 const singleStream = lhSize == 3;
- symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
+ SymbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
size_t cLitSize = 0;
- (void)bmi2; /* TODO bmi2... */
-
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
*entropyWritten = 0;
@@ -76,9 +76,9 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
}
- /* TODO bmi2 */
- { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
- : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
+ { int const flags = bmi2 ? HUF_flags_bmi2 : 0;
+ const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags)
+ : HUF_compress4X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags);
op += cSize;
cLitSize += cSize;
if (cSize == 0 || ERR_isError(cSize)) {
@@ -103,7 +103,7 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
switch(lhSize)
{
case 3: /* 2 - 2 - 10 - 10 */
- { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
+ { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
MEM_writeLE24(ostart, lhc);
break;
}
@@ -123,26 +123,30 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
}
*entropyWritten = 1;
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
- return op-ostart;
+ return (size_t)(op-ostart);
}
-static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
- const seqDef* const sstart = sequences;
- const seqDef* const send = sequences + nbSeq;
- const seqDef* sp = sstart;
+static size_t
+ZSTD_seqDecompressedSize(SeqStore_t const* seqStore,
+ const SeqDef* sequences, size_t nbSeqs,
+ size_t litSize, int lastSubBlock)
+{
size_t matchLengthSum = 0;
size_t litLengthSum = 0;
- (void)(litLengthSum); /* suppress unused variable warning on some environments */
- while (send-sp > 0) {
- ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
+ size_t n;
+ for (n=0; n<nbSeqs; n++) {
+ const ZSTD_SequenceLength seqLen = ZSTD_getSequenceLength(seqStore, sequences+n);
litLengthSum += seqLen.litLength;
matchLengthSum += seqLen.matchLength;
- sp++;
}
- assert(litLengthSum <= litSize);
- if (!lastSequence) {
+ DEBUGLOG(5, "ZSTD_seqDecompressedSize: %u sequences from %p: %u literals + %u matchlength",
+ (unsigned)nbSeqs, (const void*)sequences,
+ (unsigned)litLengthSum, (unsigned)matchLengthSum);
+ if (!lastSubBlock)
assert(litLengthSum == litSize);
- }
+ else
+ assert(litLengthSum <= litSize);
+ (void)litLengthSum;
return matchLengthSum + litSize;
}
@@ -156,13 +160,14 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef*
* @return : compressed size of sequences section of a sub-block
* Or 0 if it is unable to compress
* Or error code. */
-static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
- const ZSTD_fseCTablesMetadata_t* fseMetadata,
- const seqDef* sequences, size_t nbSeq,
- const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
- const ZSTD_CCtx_params* cctxParams,
- void* dst, size_t dstCapacity,
- const int bmi2, int writeEntropy, int* entropyWritten)
+static size_t
+ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ const SeqDef* sequences, size_t nbSeq,
+ const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const int bmi2, int writeEntropy, int* entropyWritten)
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
BYTE* const ostart = (BYTE*)dst;
@@ -176,14 +181,14 @@ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables
/* Sequences Header */
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
dstSize_tooSmall, "");
- if (nbSeq < 0x7F)
+ if (nbSeq < 128)
*op++ = (BYTE)nbSeq;
else if (nbSeq < LONGNBSEQ)
op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
else
op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
if (nbSeq==0) {
- return op - ostart;
+ return (size_t)(op - ostart);
}
/* seqHead : flags for FSE encoding type */
@@ -205,7 +210,7 @@ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables
}
{ size_t const bitstreamSize = ZSTD_encodeSequences(
- op, oend - op,
+ op, (size_t)(oend - op),
fseTables->matchlengthCTable, mlCode,
fseTables->offcodeCTable, ofCode,
fseTables->litlengthCTable, llCode,
@@ -249,7 +254,7 @@ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables
#endif
*entropyWritten = 1;
- return op - ostart;
+ return (size_t)(op - ostart);
}
/* ZSTD_compressSubBlock() :
@@ -258,7 +263,7 @@ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables
* Or 0 if it failed to compress. */
static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
- const seqDef* sequences, size_t nbSeq,
+ const SeqDef* sequences, size_t nbSeq,
const BYTE* literals, size_t litSize,
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
const ZSTD_CCtx_params* cctxParams,
@@ -275,7 +280,8 @@ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
{ size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
&entropyMetadata->hufMetadata, literals, litSize,
- op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
+ op, (size_t)(oend-op),
+ bmi2, writeLitEntropy, litEntropyWritten);
FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
if (cLitSize == 0) return 0;
op += cLitSize;
@@ -285,18 +291,18 @@ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
sequences, nbSeq,
llCode, mlCode, ofCode,
cctxParams,
- op, oend-op,
+ op, (size_t)(oend-op),
bmi2, writeSeqEntropy, seqEntropyWritten);
FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
if (cSeqSize == 0) return 0;
op += cSeqSize;
}
/* Write block header */
- { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
+ { size_t cSize = (size_t)(op-ostart) - ZSTD_blockHeaderSize;
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(ostart, cBlockHeader24);
}
- return op-ostart;
+ return (size_t)(op-ostart);
}
static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
@@ -322,7 +328,7 @@ static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t lit
return 0;
}
-static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
+static size_t ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e type,
const BYTE* codeTable, unsigned maxCode,
size_t nbSeq, const FSE_CTable* fseCTable,
const U8* additionalBits,
@@ -385,7 +391,11 @@ static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
return cSeqSizeEstimate + sequencesSectionHeaderSize;
}
-static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
+typedef struct {
+ size_t estLitSize;
+ size_t estBlockSize;
+} EstimatedBlockSize;
+static EstimatedBlockSize ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
@@ -393,15 +403,17 @@ static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize,
- int writeLitEntropy, int writeSeqEntropy) {
- size_t cSizeEstimate = 0;
- cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
- &entropy->huf, &entropyMetadata->hufMetadata,
- workspace, wkspSize, writeLitEntropy);
- cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ int writeLitEntropy, int writeSeqEntropy)
+{
+ EstimatedBlockSize ebs;
+ ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize,
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
+ ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
workspace, wkspSize, writeSeqEntropy);
- return cSizeEstimate + ZSTD_blockHeaderSize;
+ ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize;
+ return ebs;
}
static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
@@ -415,14 +427,57 @@ static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMe
return 0;
}
+static size_t countLiterals(SeqStore_t const* seqStore, const SeqDef* sp, size_t seqCount)
+{
+ size_t n, total = 0;
+ assert(sp != NULL);
+ for (n=0; n<seqCount; n++) {
+ total += ZSTD_getSequenceLength(seqStore, sp+n).litLength;
+ }
+ DEBUGLOG(6, "countLiterals for %zu sequences from %p => %zu bytes", seqCount, (const void*)sp, total);
+ return total;
+}
+
+#define BYTESCALE 256
+
+static size_t sizeBlockSequences(const SeqDef* sp, size_t nbSeqs,
+ size_t targetBudget, size_t avgLitCost, size_t avgSeqCost,
+ int firstSubBlock)
+{
+ size_t n, budget = 0, inSize=0;
+ /* entropy headers */
+ size_t const headerSize = (size_t)firstSubBlock * 120 * BYTESCALE; /* generous estimate */
+ assert(firstSubBlock==0 || firstSubBlock==1);
+ budget += headerSize;
+
+ /* first sequence => at least one sequence*/
+ budget += sp[0].litLength * avgLitCost + avgSeqCost;
+ if (budget > targetBudget) return 1;
+ inSize = sp[0].litLength + (sp[0].mlBase+MINMATCH);
+
+ /* loop over sequences */
+ for (n=1; n<nbSeqs; n++) {
+ size_t currentCost = sp[n].litLength * avgLitCost + avgSeqCost;
+ budget += currentCost;
+ inSize += sp[n].litLength + (sp[n].mlBase+MINMATCH);
+ /* stop when sub-block budget is reached */
+ if ( (budget > targetBudget)
+ /* though continue to expand until the sub-block is deemed compressible */
+ && (budget < inSize * BYTESCALE) )
+ break;
+ }
+
+ return n;
+}
+
/* ZSTD_compressSubBlock_multi() :
* Breaks super-block into multiple sub-blocks and compresses them.
- * Entropy will be written to the first block.
- * The following blocks will use repeat mode to compress.
- * All sub-blocks are compressed blocks (no raw or rle blocks).
- * @return : compressed size of the super block (which is multiple ZSTD blocks)
- * Or 0 if it failed to compress. */
-static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
+ * Entropy will be written into the first block.
+ * The following blocks use repeat_mode to compress.
+ * Sub-blocks are all compressed, except the last one when beneficial.
+ * @return : compressed size of the super block (which features multiple ZSTD blocks)
+ * or 0 if it failed to compress. */
+static size_t ZSTD_compressSubBlock_multi(const SeqStore_t* seqStorePtr,
const ZSTD_compressedBlockState_t* prevCBlock,
ZSTD_compressedBlockState_t* nextCBlock,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
@@ -432,12 +487,14 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
const int bmi2, U32 lastBlock,
void* workspace, size_t wkspSize)
{
- const seqDef* const sstart = seqStorePtr->sequencesStart;
- const seqDef* const send = seqStorePtr->sequences;
- const seqDef* sp = sstart;
+ const SeqDef* const sstart = seqStorePtr->sequencesStart;
+ const SeqDef* const send = seqStorePtr->sequences;
+ const SeqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */
+ size_t const nbSeqs = (size_t)(send - sstart);
const BYTE* const lstart = seqStorePtr->litStart;
const BYTE* const lend = seqStorePtr->lit;
const BYTE* lp = lstart;
+ size_t const nbLiterals = (size_t)(lend - lstart);
BYTE const* ip = (BYTE const*)src;
BYTE const* const iend = ip + srcSize;
BYTE* const ostart = (BYTE*)dst;
@@ -446,112 +503,171 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
const BYTE* llCodePtr = seqStorePtr->llCode;
const BYTE* mlCodePtr = seqStorePtr->mlCode;
const BYTE* ofCodePtr = seqStorePtr->ofCode;
- size_t targetCBlockSize = cctxParams->targetCBlockSize;
- size_t litSize, seqCount;
- int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
+ size_t const minTarget = ZSTD_TARGETCBLOCKSIZE_MIN; /* enforce minimum size, to reduce undesirable side effects */
+ size_t const targetCBlockSize = MAX(minTarget, cctxParams->targetCBlockSize);
+ int writeLitEntropy = (entropyMetadata->hufMetadata.hType == set_compressed);
int writeSeqEntropy = 1;
- int lastSequence = 0;
-
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
- (unsigned)(lend-lp), (unsigned)(send-sstart));
-
- litSize = 0;
- seqCount = 0;
- do {
- size_t cBlockSizeEstimate = 0;
- if (sstart == send) {
- lastSequence = 1;
- } else {
- const seqDef* const sequence = sp + seqCount;
- lastSequence = sequence == send - 1;
- litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
- seqCount++;
- }
- if (lastSequence) {
- assert(lp <= lend);
- assert(litSize <= (size_t)(lend - lp));
- litSize = (size_t)(lend - lp);
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi (srcSize=%u, litSize=%u, nbSeq=%u)",
+ (unsigned)srcSize, (unsigned)(lend-lstart), (unsigned)(send-sstart));
+
+ /* let's start by a general estimation for the full block */
+ if (nbSeqs > 0) {
+ EstimatedBlockSize const ebs =
+ ZSTD_estimateSubBlockSize(lp, nbLiterals,
+ ofCodePtr, llCodePtr, mlCodePtr, nbSeqs,
+ &nextCBlock->entropy, entropyMetadata,
+ workspace, wkspSize,
+ writeLitEntropy, writeSeqEntropy);
+ /* quick estimation */
+ size_t const avgLitCost = nbLiterals ? (ebs.estLitSize * BYTESCALE) / nbLiterals : BYTESCALE;
+ size_t const avgSeqCost = ((ebs.estBlockSize - ebs.estLitSize) * BYTESCALE) / nbSeqs;
+ const size_t nbSubBlocks = MAX((ebs.estBlockSize + (targetCBlockSize/2)) / targetCBlockSize, 1);
+ size_t n, avgBlockBudget, blockBudgetSupp=0;
+ avgBlockBudget = (ebs.estBlockSize * BYTESCALE) / nbSubBlocks;
+ DEBUGLOG(5, "estimated fullblock size=%u bytes ; avgLitCost=%.2f ; avgSeqCost=%.2f ; targetCBlockSize=%u, nbSubBlocks=%u ; avgBlockBudget=%.0f bytes",
+ (unsigned)ebs.estBlockSize, (double)avgLitCost/BYTESCALE, (double)avgSeqCost/BYTESCALE,
+ (unsigned)targetCBlockSize, (unsigned)nbSubBlocks, (double)avgBlockBudget/BYTESCALE);
+ /* simplification: if estimates states that the full superblock doesn't compress, just bail out immediately
+ * this will result in the production of a single uncompressed block covering @srcSize.*/
+ if (ebs.estBlockSize > srcSize) return 0;
+
+ /* compress and write sub-blocks */
+ assert(nbSubBlocks>0);
+ for (n=0; n < nbSubBlocks-1; n++) {
+ /* determine nb of sequences for current sub-block + nbLiterals from next sequence */
+ size_t const seqCount = sizeBlockSequences(sp, (size_t)(send-sp),
+ avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n==0);
+ /* if reached last sequence : break to last sub-block (simplification) */
+ assert(seqCount <= (size_t)(send-sp));
+ if (sp + seqCount == send) break;
+ assert(seqCount > 0);
+ /* compress sub-block */
+ { int litEntropyWritten = 0;
+ int seqEntropyWritten = 0;
+ size_t litSize = countLiterals(seqStorePtr, sp, seqCount);
+ const size_t decompressedSize =
+ ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0);
+ size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
+ sp, seqCount,
+ lp, litSize,
+ llCodePtr, mlCodePtr, ofCodePtr,
+ cctxParams,
+ op, (size_t)(oend-op),
+ bmi2, writeLitEntropy, writeSeqEntropy,
+ &litEntropyWritten, &seqEntropyWritten,
+ 0);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
+
+ /* check compressibility, update state components */
+ if (cSize > 0 && cSize < decompressedSize) {
+ DEBUGLOG(5, "Committed sub-block compressing %u bytes => %u bytes",
+ (unsigned)decompressedSize, (unsigned)cSize);
+ assert(ip + decompressedSize <= iend);
+ ip += decompressedSize;
+ lp += litSize;
+ op += cSize;
+ llCodePtr += seqCount;
+ mlCodePtr += seqCount;
+ ofCodePtr += seqCount;
+ /* Entropy only needs to be written once */
+ if (litEntropyWritten) {
+ writeLitEntropy = 0;
+ }
+ if (seqEntropyWritten) {
+ writeSeqEntropy = 0;
+ }
+ sp += seqCount;
+ blockBudgetSupp = 0;
+ } }
+ /* otherwise : do not compress yet, coalesce current sub-block with following one */
}
- /* I think there is an optimization opportunity here.
- * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
- * since it recalculates estimate from scratch.
- * For example, it would recount literal distribution and symbol codes every time.
- */
- cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
- &nextCBlock->entropy, entropyMetadata,
- workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
- if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
- int litEntropyWritten = 0;
- int seqEntropyWritten = 0;
- const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
- const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
- sp, seqCount,
- lp, litSize,
- llCodePtr, mlCodePtr, ofCodePtr,
- cctxParams,
- op, oend-op,
- bmi2, writeLitEntropy, writeSeqEntropy,
- &litEntropyWritten, &seqEntropyWritten,
- lastBlock && lastSequence);
- FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
- if (cSize > 0 && cSize < decompressedSize) {
- DEBUGLOG(5, "Committed the sub-block");
- assert(ip + decompressedSize <= iend);
- ip += decompressedSize;
- sp += seqCount;
- lp += litSize;
- op += cSize;
- llCodePtr += seqCount;
- mlCodePtr += seqCount;
- ofCodePtr += seqCount;
- litSize = 0;
- seqCount = 0;
- /* Entropy only needs to be written once */
- if (litEntropyWritten) {
- writeLitEntropy = 0;
- }
- if (seqEntropyWritten) {
- writeSeqEntropy = 0;
- }
+ } /* if (nbSeqs > 0) */
+
+ /* write last block */
+ DEBUGLOG(5, "Generate last sub-block: %u sequences remaining", (unsigned)(send - sp));
+ { int litEntropyWritten = 0;
+ int seqEntropyWritten = 0;
+ size_t litSize = (size_t)(lend - lp);
+ size_t seqCount = (size_t)(send - sp);
+ const size_t decompressedSize =
+ ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1);
+ size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
+ sp, seqCount,
+ lp, litSize,
+ llCodePtr, mlCodePtr, ofCodePtr,
+ cctxParams,
+ op, (size_t)(oend-op),
+ bmi2, writeLitEntropy, writeSeqEntropy,
+ &litEntropyWritten, &seqEntropyWritten,
+ lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
+
+ /* update pointers, the nb of literals borrowed from next sequence must be preserved */
+ if (cSize > 0 && cSize < decompressedSize) {
+ DEBUGLOG(5, "Last sub-block compressed %u bytes => %u bytes",
+ (unsigned)decompressedSize, (unsigned)cSize);
+ assert(ip + decompressedSize <= iend);
+ ip += decompressedSize;
+ lp += litSize;
+ op += cSize;
+ llCodePtr += seqCount;
+ mlCodePtr += seqCount;
+ ofCodePtr += seqCount;
+ /* Entropy only needs to be written once */
+ if (litEntropyWritten) {
+ writeLitEntropy = 0;
}
+ if (seqEntropyWritten) {
+ writeSeqEntropy = 0;
+ }
+ sp += seqCount;
}
- } while (!lastSequence);
+ }
+
+
if (writeLitEntropy) {
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
+ DEBUGLOG(5, "Literal entropy tables were never written");
ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
}
if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
/* If we haven't written our entropy tables, then we've violated our contract and
* must emit an uncompressed block.
*/
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
+ DEBUGLOG(5, "Sequence entropy tables were never written => cancel, emit an uncompressed block");
return 0;
}
+
if (ip < iend) {
- size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
+ /* some data left : last part of the block sent uncompressed */
+ size_t const rSize = (size_t)((iend - ip));
+ size_t const cSize = ZSTD_noCompressBlock(op, (size_t)(oend - op), ip, rSize, lastBlock);
+ DEBUGLOG(5, "Generate last uncompressed sub-block of %u bytes", (unsigned)(rSize));
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
assert(cSize != 0);
op += cSize;
/* We have to regenerate the repcodes because we've skipped some sequences */
if (sp < send) {
- seqDef const* seq;
- repcodes_t rep;
+ const SeqDef* seq;
+ Repcodes_t rep;
ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
for (seq = sstart; seq < sp; ++seq) {
- ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
+ ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
}
ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
}
}
- DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
- return op-ostart;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed all subBlocks: total compressed size = %u",
+ (unsigned)(op-ostart));
+ return (size_t)(op-ostart);
}
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
- void const* src, size_t srcSize,
- unsigned lastBlock) {
+ const void* src, size_t srcSize,
+ unsigned lastBlock)
+{
ZSTD_entropyCTablesMetadata_t entropyMetadata;
FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
@@ -559,7 +675,7 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
&entropyMetadata,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */), "");
return ZSTD_compressSubBlock_multi(&zc->seqStore,
zc->blockState.prevCBlock,
@@ -569,5 +685,5 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
dst, dstCapacity,
src, srcSize,
zc->bmi2, lastBlock,
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */);
}
diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
index 224ece79546e..826bbc9e029b 100644
--- a/lib/zstd/compress/zstd_compress_superblock.h
+++ b/lib/zstd/compress/zstd_compress_superblock.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
index 349fc923c355..dce42f653bae 100644
--- a/lib/zstd/compress/zstd_cwksp.h
+++ b/lib/zstd/compress/zstd_cwksp.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -14,8 +15,10 @@
/*-*************************************
* Dependencies
***************************************/
+#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
#include "../common/zstd_internal.h"
-
+#include "../common/portability_macros.h"
+#include "../common/compiler.h" /* ZS2_isPower2 */
/*-*************************************
* Constants
@@ -41,8 +44,9 @@
***************************************/
typedef enum {
ZSTD_cwksp_alloc_objects,
- ZSTD_cwksp_alloc_buffers,
- ZSTD_cwksp_alloc_aligned
+ ZSTD_cwksp_alloc_aligned_init_once,
+ ZSTD_cwksp_alloc_aligned,
+ ZSTD_cwksp_alloc_buffers
} ZSTD_cwksp_alloc_phase_e;
/*
@@ -95,8 +99,8 @@ typedef enum {
*
* Workspace Layout:
*
- * [ ... workspace ... ]
- * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
+ * [ ... workspace ... ]
+ * [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
*
* The various objects that live in the workspace are divided into the
* following categories, and are allocated separately:
@@ -120,9 +124,18 @@ typedef enum {
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
* Their sizes depend on the cparams. These tables are 64-byte aligned.
*
- * - Aligned: these buffers are used for various purposes that require 4 byte
- * alignment, but don't require any initialization before they're used. These
- * buffers are each aligned to 64 bytes.
+ * - Init once: these buffers require to be initialized at least once before
+ * use. They should be used when we want to skip memory initialization
+ * while not triggering memory checkers (like Valgrind) when reading from
+ * from this memory without writing to it first.
+ * These buffers should be used carefully as they might contain data
+ * from previous compressions.
+ * Buffers are aligned to 64 bytes.
+ *
+ * - Aligned: these buffers don't require any initialization before they're
+ * used. The user of the buffer should make sure they write into a buffer
+ * location before reading from it.
+ * Buffers are aligned to 64 bytes.
*
* - Buffers: these buffers are used for various purposes that don't require
* any alignment or initialization before they're used. This means they can
@@ -134,8 +147,9 @@ typedef enum {
* correctly packed into the workspace buffer. That order is:
*
* 1. Objects
- * 2. Buffers
- * 3. Aligned/Tables
+ * 2. Init once / Tables
+ * 3. Aligned / Tables
+ * 4. Buffers / Tables
*
* Attempts to reserve objects of different types out of order will fail.
*/
@@ -147,6 +161,7 @@ typedef struct {
void* tableEnd;
void* tableValidEnd;
void* allocStart;
+ void* initOnceStart;
BYTE allocFailed;
int workspaceOversizedDuration;
@@ -159,6 +174,7 @@ typedef struct {
***************************************/
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
+MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
(void)ws;
@@ -168,14 +184,16 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
assert(ws->tableEnd <= ws->allocStart);
assert(ws->tableValidEnd <= ws->allocStart);
assert(ws->allocStart <= ws->workspaceEnd);
+ assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
+ assert(ws->workspace <= ws->initOnceStart);
}
/*
* Align must be a power of 2.
*/
-MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
+MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) {
size_t const mask = align - 1;
- assert((align & mask) == 0);
+ assert(ZSTD_isPower2(align));
return (size + mask) & ~mask;
}
@@ -189,7 +207,7 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
* to figure out how much space you need for the matchState tables. Everything
* else is though.
*
- * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size().
*/
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
if (size == 0)
@@ -197,12 +215,16 @@ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
return size;
}
+MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) {
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment));
+}
+
/*
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
* Used to determine the number of bytes required for a given "aligned".
*/
-MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
- return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
+MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) {
+ return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES);
}
/*
@@ -210,14 +232,10 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
* for internal purposes (currently only alignment).
*/
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
- /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
- * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
- * to align the beginning of the aligned section.
- *
- * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
- * aligneds being sized in multiples of 64 bytes.
+ /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
+ * bytes to align the beginning of tables section and end of buffers;
*/
- size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
+ size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
return slackSpace;
}
@@ -229,12 +247,24 @@ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
size_t const alignBytesMask = alignBytes - 1;
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
- assert((alignBytes & alignBytesMask) == 0);
- assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
+ assert(ZSTD_isPower2(alignBytes));
+ assert(bytes < alignBytes);
return bytes;
}
/*
+ * Returns the initial value for allocStart which is used to determine the position from
+ * which we can allocate from the end of the workspace.
+ */
+MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws)
+{
+ char* endPtr = (char*)ws->workspaceEnd;
+ assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES));
+ endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES);
+ return (void*)endPtr;
+}
+
+/*
* Internal function. Do not use directly.
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
* which counts from the end of the wksp (as opposed to the object/table segment).
@@ -246,7 +276,7 @@ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
{
void* const alloc = (BYTE*)ws->allocStart - bytes;
void* const bottom = ws->tableEnd;
- DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
+ DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(alloc >= bottom);
@@ -274,27 +304,16 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase
{
assert(phase >= ws->phase);
if (phase > ws->phase) {
- /* Going from allocating objects to allocating buffers */
- if (ws->phase < ZSTD_cwksp_alloc_buffers &&
- phase >= ZSTD_cwksp_alloc_buffers) {
+ /* Going from allocating objects to allocating initOnce / tables */
+ if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
+ phase >= ZSTD_cwksp_alloc_aligned_init_once) {
ws->tableValidEnd = ws->objectEnd;
- }
+ ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
- /* Going from allocating buffers to allocating aligneds/tables */
- if (ws->phase < ZSTD_cwksp_alloc_aligned &&
- phase >= ZSTD_cwksp_alloc_aligned) {
- { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
- size_t const bytesToAlign =
- ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
- DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
- ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
- RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
- memory_allocation, "aligned phase - alignment initial allocation failed!");
- }
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
- void* const alloc = ws->objectEnd;
+ void *const alloc = ws->objectEnd;
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
- void* const objectEnd = (BYTE*)alloc + bytesToAlign;
+ void *const objectEnd = (BYTE *) alloc + bytesToAlign;
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
"table phase - alignment initial allocation failed!");
@@ -302,7 +321,9 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase
ws->tableEnd = objectEnd; /* table area starts being empty */
if (ws->tableValidEnd < ws->tableEnd) {
ws->tableValidEnd = ws->tableEnd;
- } } }
+ }
+ }
+ }
ws->phase = phase;
ZSTD_cwksp_assert_internal_consistency(ws);
}
@@ -314,7 +335,7 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase
*/
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
{
- return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
+ return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
}
/*
@@ -345,29 +366,61 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
/*
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
+ * This memory has been initialized at least once in the past.
+ * This doesn't mean it has been initialized this time, and it might contain data from previous
+ * operations.
+ * The main usage is for algorithms that might need read access into uninitialized memory.
+ * The algorithm must maintain safety under these conditions and must make sure it doesn't
+ * leak any of the past data (directly or in side channels).
*/
-MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
+MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
{
- void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
- ZSTD_cwksp_alloc_aligned);
- assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
+ if(ptr && ptr < ws->initOnceStart) {
+ /* We assume the memory following the current allocation is either:
+ * 1. Not usable as initOnce memory (end of workspace)
+ * 2. Another initOnce buffer that has been allocated before (and so was previously memset)
+ * 3. An ASAN redzone, in which case we don't want to write on it
+ * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
+ * Note that we assume here that MSAN and ASAN cannot run in the same time. */
+ ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
+ ws->initOnceStart = ptr;
+ }
+ return ptr;
+}
+
+/*
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes)
+{
+ void* const ptr = ZSTD_cwksp_reserve_internal(ws,
+ ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
+ ZSTD_cwksp_alloc_aligned);
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
return ptr;
}
/*
* Aligned on 64 bytes. These buffers have the special property that
- * their values remain constrained, allowing us to re-use them without
+ * their values remain constrained, allowing us to reuse them without
* memset()-ing them.
*/
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
{
- const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
+ const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
void* alloc;
void* end;
void* top;
- if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
- return NULL;
+ /* We can only start allocating tables after we are done reserving space for objects at the
+ * start of the workspace */
+ if(ws->phase < phase) {
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
+ return NULL;
+ }
}
alloc = ws->tableEnd;
end = (BYTE *)alloc + bytes;
@@ -387,7 +440,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
- assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
return alloc;
}
@@ -421,6 +474,20 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
return alloc;
}
+/*
+ * with alignment control
+ * Note : should happen only once, at workspace first initialization
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment)
+{
+ size_t const mask = alignment - 1;
+ size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0;
+ void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus);
+ if (start == NULL) return NULL;
+ if (surplus == 0) return start;
+ assert(ZSTD_isPower2(alignment));
+ return (void*)(((size_t)start + surplus) & ~mask);
+}
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
{
@@ -451,7 +518,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
assert(ws->tableValidEnd >= ws->objectEnd);
assert(ws->tableValidEnd <= ws->allocStart);
if (ws->tableValidEnd < ws->tableEnd) {
- ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
+ ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
}
ZSTD_cwksp_mark_tables_clean(ws);
}
@@ -460,7 +527,8 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
* Invalidates table allocations.
* All other allocations remain valid.
*/
-MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
+MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws)
+{
DEBUGLOG(4, "cwksp: clearing tables!");
@@ -478,14 +546,23 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
ws->tableEnd = ws->objectEnd;
- ws->allocStart = ws->workspaceEnd;
+ ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
ws->allocFailed = 0;
- if (ws->phase > ZSTD_cwksp_alloc_buffers) {
- ws->phase = ZSTD_cwksp_alloc_buffers;
+ if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
+ ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
}
ZSTD_cwksp_assert_internal_consistency(ws);
}
+MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
+}
+
/*
* The provided workspace takes ownership of the buffer [start, start+size).
* Any existing values in the workspace are ignored (the previously managed
@@ -498,6 +575,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_c
ws->workspaceEnd = (BYTE*)start + size;
ws->objectEnd = ws->workspace;
ws->tableValidEnd = ws->objectEnd;
+ ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
ws->phase = ZSTD_cwksp_alloc_objects;
ws->isStatic = isStatic;
ZSTD_cwksp_clear(ws);
@@ -529,15 +607,6 @@ MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
}
-MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
- return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
-}
-
-MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
- return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
- + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
-}
-
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
return ws->allocFailed;
}
@@ -550,17 +619,11 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
* actual amount of space used.
*/
-MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
- size_t const estimatedSpace, int resizedWorkspace) {
- if (resizedWorkspace) {
- /* Resized/newly allocated wksp should have exact bounds */
- return ZSTD_cwksp_used(ws) == estimatedSpace;
- } else {
- /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
- * than estimatedSpace. See the comments in zstd_cwksp.h for details.
- */
- return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
- }
+MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
+ /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
+ * the alignment bytes difference between estimation and actual usage */
+ return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
+ ZSTD_cwksp_used(ws) <= estimatedSpace;
}
@@ -591,5 +654,4 @@ MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
}
}
-
#endif /* ZSTD_CWKSP_H */
diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
index 76933dea2624..995e83f3a183 100644
--- a/lib/zstd/compress/zstd_double_fast.c
+++ b/lib/zstd/compress/zstd_double_fast.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,8 +12,49 @@
#include "zstd_compress_internal.h"
#include "zstd_double_fast.h"
+#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashLarge = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
+ U32 const mls = cParams->minMatch;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash tables.
+ * Insert the other positions into the large hash table if their entry
+ * is empty.
+ */
+ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ U32 i;
+ for (i = 0; i < fastHashFillStep; ++i) {
+ size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls);
+ size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8);
+ if (i == 0) {
+ ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i);
+ }
+ if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) {
+ ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i);
+ }
+ /* Only load extra positions for ZSTD_dtlm_full */
+ if (dtlm == ZSTD_dtlm_fast)
+ break;
+ } }
+}
+
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -43,13 +85,26 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
/* Only load extra positions for ZSTD_dtlm_full */
if (dtlm == ZSTD_dtlm_fast)
break;
- } }
+ } }
+}
+
+void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp)
+{
+ if (tfp == ZSTD_tfp_forCDict) {
+ ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm);
+ } else {
+ ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm);
+ }
}
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_doubleFast_noDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls /* template */)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
@@ -67,7 +122,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
- U32 offsetSaved = 0;
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
size_t mLength;
U32 offset;
@@ -88,9 +143,14 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
const BYTE* matchl0; /* the long match for ip */
const BYTE* matchs0; /* the short match for ip */
const BYTE* matchl1; /* the long match for ip1 */
+ const BYTE* matchs0_safe; /* matchs0 or safe address */
const BYTE* ip = istart; /* the current position */
const BYTE* ip1; /* the next position */
+ /* Array of ~random data, should have low probability of matching data
+ * we load from here instead of from tables, if matchl0/matchl1 are
+ * invalid indices. Used to avoid unpredictable branches. */
+ const BYTE dummy[] = {0x12,0x34,0x56,0x78,0x9a,0xbc,0xde,0xf0,0xe2,0xb4};
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
@@ -100,8 +160,8 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
U32 const current = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
U32 const maxRep = current - windowLow;
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
}
/* Outer Loop: one iteration per match found and stored */
@@ -131,30 +191,35 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
goto _match_stored;
}
hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
- if (idxl0 > prefixLowestIndex) {
+ /* idxl0 > prefixLowestIndex is a (somewhat) unpredictable branch.
+ * However expression below complies into conditional move. Since
+ * match is unlikely and we only *branch* on idxl0 > prefixLowestIndex
+ * if there is a match, all branches become predictable. */
+ { const BYTE* const matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]);
+
/* check prefix long match */
- if (MEM_read64(matchl0) == MEM_read64(ip)) {
+ if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) {
mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
offset = (U32)(ip-matchl0);
while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
goto _match_found;
- }
- }
+ } }
idxl1 = hashLong[hl1];
matchl1 = base + idxl1;
- if (idxs0 > prefixLowestIndex) {
- /* check prefix short match */
- if (MEM_read32(matchs0) == MEM_read32(ip)) {
- goto _search_next_long;
- }
+ /* Same optimization as matchl0 above */
+ matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]);
+
+ /* check prefix short match */
+ if(MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) {
+ goto _search_next_long;
}
if (ip1 >= nextStep) {
@@ -175,30 +240,36 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
} while (ip1 <= ilimit);
_cleanup:
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
+
/* save reps for next block */
- rep[0] = offset_1 ? offset_1 : offsetSaved;
- rep[1] = offset_2 ? offset_2 : offsetSaved;
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
/* Return the last literals size */
return (size_t)(iend - anchor);
_search_next_long:
- /* check prefix long +1 match */
- if (idxl1 > prefixLowestIndex) {
- if (MEM_read64(matchl1) == MEM_read64(ip1)) {
+ /* short match found: let's check for a longer one */
+ mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
+ offset = (U32)(ip - matchs0);
+
+ /* check long match at +1 position */
+ if ((idxl1 > prefixLowestIndex) && (MEM_read64(matchl1) == MEM_read64(ip1))) {
+ size_t const l1len = ZSTD_count(ip1+8, matchl1+8, iend) + 8;
+ if (l1len > mLength) {
+ /* use the long match instead */
ip = ip1;
- mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
+ mLength = l1len;
offset = (U32)(ip-matchl1);
- while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
- goto _match_found;
+ matchs0 = matchl1;
}
}
- /* if no long +1 match, explore the short match we found */
- mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
- offset = (U32)(ip - matchs0);
- while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
+ while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* complete backward */
/* fall-through */
@@ -217,7 +288,7 @@ _match_found: /* requires ip, offset, mLength */
hashLong[hl1] = (U32)(ip1 - base);
}
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
_match_stored:
/* match found */
@@ -243,7 +314,7 @@ _match_stored:
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
- ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength);
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
ip += rLength;
anchor = ip;
continue; /* faster when present ... (?) */
@@ -254,8 +325,9 @@ _match_stored:
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls /* template */)
{
@@ -275,9 +347,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
- U32 offsetSaved = 0;
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
const U32* const dictHashLong = dms->hashTable;
const U32* const dictHashSmall = dms->chainTable;
@@ -286,8 +357,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dms->window.nextSrc;
const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
- const U32 dictHBitsL = dictCParams->hashLog;
- const U32 dictHBitsS = dictCParams->chainLog;
+ const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
+ const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
@@ -295,6 +366,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
/* if a dictionary is attached, it must be within window range */
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
+ if (ms->prefetchCDictTables) {
+ size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
+ size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32);
+ PREFETCH_AREA(dictHashLong, hashTableBytes);
+ PREFETCH_AREA(dictHashSmall, chainTableBytes);
+ }
+
/* init */
ip += (dictAndPrefixLength == 0);
@@ -309,8 +387,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
U32 offset;
size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
- size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
- size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
+ size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8);
+ size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls);
+ U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS];
+ U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS];
+ int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL);
+ int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS);
U32 const curr = (U32)(ip-base);
U32 const matchIndexL = hashLong[h2];
U32 matchIndexS = hashSmall[h];
@@ -323,26 +405,24 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
/* check repcode */
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
goto _match_stored;
}
- if (matchIndexL > prefixLowestIndex) {
+ if ((matchIndexL >= prefixLowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
/* check prefix long match */
- if (MEM_read64(matchLong) == MEM_read64(ip)) {
- mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
- offset = (U32)(ip-matchLong);
- while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
- goto _match_found;
- }
- } else {
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+ offset = (U32)(ip-matchLong);
+ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ goto _match_found;
+ } else if (dictTagsMatchL) {
/* check dictMatchState long match */
- U32 const dictMatchIndexL = dictHashLong[dictHL];
+ U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS;
const BYTE* dictMatchL = dictBase + dictMatchIndexL;
assert(dictMatchL < dictEnd);
@@ -354,13 +434,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
} }
if (matchIndexS > prefixLowestIndex) {
- /* check prefix short match */
+ /* short match candidate */
if (MEM_read32(match) == MEM_read32(ip)) {
goto _search_next_long;
}
- } else {
+ } else if (dictTagsMatchS) {
/* check dictMatchState short match */
- U32 const dictMatchIndexS = dictHashSmall[dictHS];
+ U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS;
match = dictBase + dictMatchIndexS;
matchIndexS = dictMatchIndexS + dictIndexDelta;
@@ -375,25 +455,24 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
continue;
_search_next_long:
-
{ size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
- size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
+ size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
U32 const matchIndexL3 = hashLong[hl3];
+ U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS];
+ int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3);
const BYTE* matchL3 = base + matchIndexL3;
hashLong[hl3] = curr + 1;
/* check prefix long +1 match */
- if (matchIndexL3 > prefixLowestIndex) {
- if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
- mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
- ip++;
- offset = (U32)(ip-matchL3);
- while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
- goto _match_found;
- }
- } else {
+ if ((matchIndexL3 >= prefixLowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1))) {
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+ ip++;
+ offset = (U32)(ip-matchL3);
+ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ } else if (dictTagsMatchL3) {
/* check dict long +1 match */
- U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
+ U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS;
const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
assert(dictMatchL3 < dictEnd);
if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
@@ -419,7 +498,7 @@ _match_found:
offset_2 = offset_1;
offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
_match_stored:
/* match found */
@@ -443,12 +522,12 @@ _match_stored:
const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
dictBase + repIndex2 - dictIndexDelta :
base + repIndex2;
- if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2))
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
@@ -461,8 +540,8 @@ _match_stored:
} /* while (ip < ilimit) */
/* save reps for next block */
- rep[0] = offset_1 ? offset_1 : offsetSaved;
- rep[1] = offset_2 ? offset_2 : offsetSaved;
+ rep[0] = offset_1;
+ rep[1] = offset_2;
/* Return the last literals size */
return (size_t)(iend - anchor);
@@ -470,7 +549,7 @@ _match_stored:
#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
@@ -488,7 +567,7 @@ ZSTD_GEN_DFAST_FN(dictMatchState, 7)
size_t ZSTD_compressBlock_doubleFast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
const U32 mls = ms->cParams.minMatch;
@@ -508,7 +587,7 @@ size_t ZSTD_compressBlock_doubleFast(
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
const U32 mls = ms->cParams.minMatch;
@@ -527,8 +606,10 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState(
}
-static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls /* template */)
{
@@ -579,13 +660,13 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
size_t mLength;
hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
- if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
+ if (((ZSTD_index_overlap_check(prefixStartIndex, repIndex))
& (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
} else {
if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
@@ -596,7 +677,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
} else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
@@ -621,7 +702,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
}
offset_2 = offset_1;
offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
} else {
ip += ((ip-anchor) >> kSearchStrength) + 1;
@@ -647,13 +728,13 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
+ if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2))
& (offset_2 <= current2 - dictStartIndex))
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
@@ -677,7 +758,7 @@ ZSTD_GEN_DFAST_FN(extDict, 6)
ZSTD_GEN_DFAST_FN(extDict, 7)
size_t ZSTD_compressBlock_doubleFast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
@@ -694,3 +775,5 @@ size_t ZSTD_compressBlock_doubleFast_extDict(
return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
}
}
+
+#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */
diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
index 6822bde65a1d..011556ce56f7 100644
--- a/lib/zstd/compress/zstd_double_fast.h
+++ b/lib/zstd/compress/zstd_double_fast.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,22 +12,32 @@
#ifndef ZSTD_DOUBLE_FAST_H
#define ZSTD_DOUBLE_FAST_H
-
#include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
- void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+
+void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp);
+
size_t ZSTD_compressBlock_doubleFast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST ZSTD_compressBlock_doubleFast
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE ZSTD_compressBlock_doubleFast_dictMatchState
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT ZSTD_compressBlock_doubleFast_extDict
+#else
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST NULL
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT NULL
+#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */
#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
index a752e6beab52..60e07e839e5f 100644
--- a/lib/zstd/compress/zstd_fast.c
+++ b/lib/zstd/compress/zstd_fast.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,8 +12,46 @@
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
#include "zstd_fast.h"
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
+ U32 const mls = cParams->minMatch;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
+ * Feel free to remove this assert if there's a good reason! */
+ assert(dtlm == ZSTD_dtlm_full);
+
+ /* Always insert every fastHashFillStep position into the hash table.
+ * Insert the other positions if their hash entry is empty.
+ */
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }
+
+ if (dtlm == ZSTD_dtlm_fast) continue;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ { U32 p;
+ for (p = 1; p < fastHashFillStep; ++p) {
+ size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
+ if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
+ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
+ } } } }
+}
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm)
{
@@ -25,6 +64,10 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
const U32 fastHashFillStep = 3;
+ /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.
+ * Feel free to remove this assert if there's a good reason! */
+ assert(dtlm == ZSTD_dtlm_fast);
+
/* Always insert every fastHashFillStep position into the hash table.
* Insert the other positions if their hash entry is empty.
*/
@@ -42,6 +85,60 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
} } } }
}
+void ZSTD_fillHashTable(ZSTD_MatchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp)
+{
+ if (tfp == ZSTD_tfp_forCDict) {
+ ZSTD_fillHashTableForCDict(ms, end, dtlm);
+ } else {
+ ZSTD_fillHashTableForCCtx(ms, end, dtlm);
+ }
+}
+
+
+typedef int (*ZSTD_match4Found) (const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit);
+
+static int
+ZSTD_match4Found_cmov(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit)
+{
+ /* Array of ~random data, should have low probability of matching data.
+ * Load from here if the index is invalid.
+ * Used to avoid unpredictable branches. */
+ static const BYTE dummy[] = {0x12,0x34,0x56,0x78};
+
+ /* currentIdx >= lowLimit is a (somewhat) unpredictable branch.
+ * However expression below compiles into conditional move.
+ */
+ const BYTE* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy);
+ /* Note: this used to be written as : return test1 && test2;
+ * Unfortunately, once inlined, these tests become branches,
+ * in which case it becomes critical that they are executed in the right order (test1 then test2).
+ * So we have to write these tests in a specific manner to ensure their ordering.
+ */
+ if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) return 0;
+ /* force ordering of these tests, which matters once the function is inlined, as they become branches */
+ __asm__("");
+ return matchIdx >= idxLowLimit;
+}
+
+static int
+ZSTD_match4Found_branch(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit)
+{
+ /* using a branch instead of a cmov,
+ * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true,
+ * aka almost all candidates are within range */
+ U32 mval;
+ if (matchIdx >= idxLowLimit) {
+ mval = MEM_read32(matchAddress);
+ } else {
+ mval = MEM_read32(currentPtr) ^ 1; /* guaranteed to not match. */
+ }
+
+ return (MEM_read32(currentPtr) == mval);
+}
+
/*
* If you squint hard enough (and ignore repcodes), the search operation at any
@@ -89,17 +186,17 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
*
* This is also the work we do at the beginning to enter the loop initially.
*/
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_compressBlock_fast_noDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_fast_noDict_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
- U32 const mls, U32 const hasStep)
+ U32 const mls, int useCmov)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
- /* support stepSize of 0 */
- size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; /* min 2 */
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
@@ -117,12 +214,11 @@ ZSTD_compressBlock_fast_noDict_generic(
U32 rep_offset1 = rep[0];
U32 rep_offset2 = rep[1];
- U32 offsetSaved = 0;
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
size_t hash0; /* hash for ip0 */
size_t hash1; /* hash for ip1 */
- U32 idx; /* match idx for ip0 */
- U32 mval; /* src value at match idx */
+ U32 matchIdx; /* match idx for ip0 */
U32 offcode;
const BYTE* match0;
@@ -135,14 +231,15 @@ ZSTD_compressBlock_fast_noDict_generic(
size_t step;
const BYTE* nextStep;
const size_t kStepIncr = (1 << (kSearchStrength - 1));
+ const ZSTD_match4Found matchFound = useCmov ? ZSTD_match4Found_cmov : ZSTD_match4Found_branch;
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
ip0 += (ip0 == prefixStart);
{ U32 const curr = (U32)(ip0 - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
U32 const maxRep = curr - windowLow;
- if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0;
- if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0;
+ if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
+ if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
}
/* start each op */
@@ -163,7 +260,7 @@ _start: /* Requires: ip0 */
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
- idx = hashTable[hash0];
+ matchIdx = hashTable[hash0];
do {
/* load repcode match for ip[2]*/
@@ -180,26 +277,28 @@ _start: /* Requires: ip0 */
mLength = ip0[-1] == match0[-1];
ip0 -= mLength;
match0 -= mLength;
- offcode = STORE_REPCODE_1;
+ offcode = REPCODE1_TO_OFFBASE;
mLength += 4;
+
+ /* Write next hash table entry: it's already calculated.
+ * This write is known to be safe because ip1 is before the
+ * repcode (ip2). */
+ hashTable[hash1] = (U32)(ip1 - base);
+
goto _match;
}
- /* load match for ip[0] */
- if (idx >= prefixStartIndex) {
- mval = MEM_read32(base + idx);
- } else {
- mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
- }
+ if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) {
+ /* Write next hash table entry (it's already calculated).
+ * This write is known to be safe because the ip1 == ip0 + 1,
+ * so searching will resume after ip1 */
+ hashTable[hash1] = (U32)(ip1 - base);
- /* check match at ip[0] */
- if (MEM_read32(ip0) == mval) {
- /* found a match! */
goto _offset;
}
/* lookup ip[1] */
- idx = hashTable[hash1];
+ matchIdx = hashTable[hash1];
/* hash ip[2] */
hash0 = hash1;
@@ -214,21 +313,19 @@ _start: /* Requires: ip0 */
current0 = (U32)(ip0 - base);
hashTable[hash0] = current0;
- /* load match for ip[0] */
- if (idx >= prefixStartIndex) {
- mval = MEM_read32(base + idx);
- } else {
- mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
- }
-
- /* check match at ip[0] */
- if (MEM_read32(ip0) == mval) {
- /* found a match! */
+ if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) {
+ /* Write next hash table entry, since it's already calculated */
+ if (step <= 4) {
+ /* Avoid writing an index if it's >= position where search will resume.
+ * The minimum possible match has length 4, so search can resume at ip0 + 4.
+ */
+ hashTable[hash1] = (U32)(ip1 - base);
+ }
goto _offset;
}
/* lookup ip[1] */
- idx = hashTable[hash1];
+ matchIdx = hashTable[hash1];
/* hash ip[2] */
hash0 = hash1;
@@ -250,13 +347,28 @@ _start: /* Requires: ip0 */
} while (ip3 < ilimit);
_cleanup:
- /* Note that there are probably still a couple positions we could search.
+ /* Note that there are probably still a couple positions one could search.
* However, it seems to be a meaningful performance hit to try to search
* them. So let's not. */
+ /* When the repcodes are outside of the prefix, we set them to zero before the loop.
+ * When the offsets are still zero, we need to restore them after the block to have a correct
+ * repcode history. If only one offset was invalid, it is easy. The tricky case is when both
+ * offsets were invalid. We need to figure out which offset to refill with.
+ * - If both offsets are zero they are in the same order.
+ * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
+ * - If only one is zero, we need to decide which offset to restore.
+ * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
+ * - It is impossible for rep_offset2 to be non-zero.
+ *
+ * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
+ * set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
+ */
+ offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
+
/* save reps for next block */
- rep[0] = rep_offset1 ? rep_offset1 : offsetSaved;
- rep[1] = rep_offset2 ? rep_offset2 : offsetSaved;
+ rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
+ rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
/* Return the last literals size */
return (size_t)(iend - anchor);
@@ -264,10 +376,10 @@ _cleanup:
_offset: /* Requires: ip0, idx */
/* Compute the offset code. */
- match0 = base + idx;
+ match0 = base + matchIdx;
rep_offset2 = rep_offset1;
rep_offset1 = (U32)(ip0-match0);
- offcode = STORE_OFFSET(rep_offset1);
+ offcode = OFFSET_TO_OFFBASE(rep_offset1);
mLength = 4;
/* Count the backwards match length. */
@@ -287,11 +399,6 @@ _match: /* Requires: ip0, match0, offcode */
ip0 += mLength;
anchor = ip0;
- /* write next hash table entry */
- if (ip1 < ip0) {
- hashTable[hash1] = (U32)(ip1 - base);
- }
-
/* Fill table and check for immediate repcode. */
if (ip0 <= ilimit) {
/* Fill Table */
@@ -306,7 +413,7 @@ _match: /* Requires: ip0, match0, offcode */
{ U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
ip0 += rLength;
- ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength);
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
anchor = ip0;
continue; /* faster when present (confirmed on gcc-8) ... (?) */
} } }
@@ -314,12 +421,12 @@ _match: /* Requires: ip0, match0, offcode */
goto _start;
}
-#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
- static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+#define ZSTD_GEN_FAST_FN(dictMode, mml, cmov) \
+ static size_t ZSTD_compressBlock_fast_##dictMode##_##mml##_##cmov( \
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
- return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
+ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mml, cmov); \
}
ZSTD_GEN_FAST_FN(noDict, 4, 1)
@@ -333,13 +440,15 @@ ZSTD_GEN_FAST_FN(noDict, 6, 0)
ZSTD_GEN_FAST_FN(noDict, 7, 0)
size_t ZSTD_compressBlock_fast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- U32 const mls = ms->cParams.minMatch;
+ U32 const mml = ms->cParams.minMatch;
+ /* use cmov when "candidate in range" branch is likely unpredictable */
+ int const useCmov = ms->cParams.windowLog < 19;
assert(ms->dictMatchState == NULL);
- if (ms->cParams.targetLength > 1) {
- switch(mls)
+ if (useCmov) {
+ switch(mml)
{
default: /* includes case 3 */
case 4 :
@@ -352,7 +461,8 @@ size_t ZSTD_compressBlock_fast(
return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
}
} else {
- switch(mls)
+ /* use a branch instead */
+ switch(mml)
{
default: /* includes case 3 */
case 4 :
@@ -364,13 +474,13 @@ size_t ZSTD_compressBlock_fast(
case 7 :
return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
}
-
}
}
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -380,16 +490,16 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
+ const BYTE* ip0 = istart;
+ const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */
const BYTE* anchor = istart;
const U32 prefixStartIndex = ms->window.dictLimit;
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
- U32 offsetSaved = 0;
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
const U32* const dictHashTable = dms->hashTable;
const U32 dictStartIndex = dms->window.dictLimit;
@@ -397,13 +507,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dms->window.nextSrc;
const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
- const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
- const U32 dictHLog = dictCParams->hashLog;
+ const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);
+ const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
/* if a dictionary is still attached, it necessarily means that
* it is within window size. So we just check it. */
const U32 maxDistance = 1U << cParams->windowLog;
- const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
assert(endIndex - prefixStartIndex <= maxDistance);
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
@@ -413,106 +523,154 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
* when translating a dict index into a local index */
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
+ if (ms->prefetchCDictTables) {
+ size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
+ PREFETCH_AREA(dictHashTable, hashTableBytes);
+ }
+
/* init */
DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
- ip += (dictAndPrefixLength == 0);
+ ip0 += (dictAndPrefixLength == 0);
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ /* Outer search loop */
+ assert(stepSize >= 1);
+ while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */
size_t mLength;
- size_t const h = ZSTD_hashPtr(ip, hlog, mls);
- U32 const curr = (U32)(ip-base);
- U32 const matchIndex = hashTable[h];
- const BYTE* match = base + matchIndex;
- const U32 repIndex = curr + 1 - offset_1;
- const BYTE* repMatch = (repIndex < prefixStartIndex) ?
- dictBase + (repIndex - dictIndexDelta) :
- base + repIndex;
- hashTable[h] = curr; /* update hash table */
-
- if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
- ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
- } else if ( (matchIndex <= prefixStartIndex) ) {
- size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
- U32 const dictMatchIndex = dictHashTable[dictHash];
- const BYTE* dictMatch = dictBase + dictMatchIndex;
- if (dictMatchIndex <= dictStartIndex ||
- MEM_read32(dictMatch) != MEM_read32(ip)) {
- assert(stepSize >= 1);
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
- continue;
- } else {
- /* found a dict match */
- U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
- mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
- while (((ip>anchor) & (dictMatch>dictStart))
- && (ip[-1] == dictMatch[-1])) {
- ip--; dictMatch--; mLength++;
+ size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
+
+ size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
+ U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
+ int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
+
+ U32 matchIndex = hashTable[hash0];
+ U32 curr = (U32)(ip0 - base);
+ size_t step = stepSize;
+ const size_t kStepIncr = 1 << kSearchStrength;
+ const BYTE* nextStep = ip0 + kStepIncr;
+
+ /* Inner search loop */
+ while (1) {
+ const BYTE* match = base + matchIndex;
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
+ size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
+ hashTable[hash0] = curr; /* update hash table */
+
+ if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
+ ip0++;
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
+ break;
+ }
+
+ if (dictTagsMatch) {
+ /* Found a possible dict match */
+ const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
+ if (dictMatchIndex > dictStartIndex &&
+ MEM_read32(dictMatch) == MEM_read32(ip0)) {
+ /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
+ if (matchIndex <= prefixStartIndex) {
+ U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
+ mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
+ while (((ip0 > anchor) & (dictMatch > dictStart))
+ && (ip0[-1] == dictMatch[-1])) {
+ ip0--;
+ dictMatch--;
+ mLength++;
+ } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
+ break;
+ }
+ }
+ }
+
+ if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex)) {
+ /* found a regular match of size >= 4 */
+ U32 const offset = (U32) (ip0 - match);
+ mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
+ while (((ip0 > anchor) & (match > prefixStart))
+ && (ip0[-1] == match[-1])) {
+ ip0--;
+ match--;
+ mLength++;
} /* catch up */
offset_2 = offset_1;
offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
+ break;
}
- } else if (MEM_read32(match) != MEM_read32(ip)) {
- /* it's not a match, and we're not going to check the dictionary */
- assert(stepSize >= 1);
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
- continue;
- } else {
- /* found a regular match */
- U32 const offset = (U32)(ip-match);
- mLength = ZSTD_count(ip+4, match+4, iend) + 4;
- while (((ip>anchor) & (match>prefixStart))
- && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
- }
+
+ /* Prepare for next iteration */
+ dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
+ dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
+ matchIndex = hashTable[hash1];
+
+ if (ip1 >= nextStep) {
+ step++;
+ nextStep += kStepIncr;
+ }
+ ip0 = ip1;
+ ip1 = ip1 + step;
+ if (ip1 > ilimit) goto _cleanup;
+
+ curr = (U32)(ip0 - base);
+ hash0 = hash1;
+ } /* end inner search loop */
/* match found */
- ip += mLength;
- anchor = ip;
+ assert(mLength);
+ ip0 += mLength;
+ anchor = ip0;
- if (ip <= ilimit) {
+ if (ip0 <= ilimit) {
/* Fill Table */
assert(base+curr+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
/* check immediate repcode */
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
+ while (ip0 <= ilimit) {
+ U32 const current2 = (U32)(ip0-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
dictBase - dictIndexDelta + repIndex2 :
base + repIndex2;
- if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ if ( (ZSTD_index_overlap_check(prefixStartIndex, repIndex2))
+ && (MEM_read32(repMatch2) == MEM_read32(ip0))) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
- ip += repLength2;
- anchor = ip;
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
+ ip0 += repLength2;
+ anchor = ip0;
continue;
}
break;
}
}
+
+ /* Prepare for next iteration */
+ assert(ip0 == anchor);
+ ip1 = ip0 + stepSize;
}
+_cleanup:
/* save reps for next block */
- rep[0] = offset_1 ? offset_1 : offsetSaved;
- rep[1] = offset_2 ? offset_2 : offsetSaved;
+ rep[0] = offset_1;
+ rep[1] = offset_2;
/* Return the last literals size */
return (size_t)(iend - anchor);
@@ -525,7 +683,7 @@ ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
size_t ZSTD_compressBlock_fast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
@@ -545,19 +703,20 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
}
-static size_t ZSTD_compressBlock_fast_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_fast_extDict_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
/* support stepSize of 0 */
- U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
@@ -570,6 +729,28 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
+
+ const BYTE* ip0 = istart;
+ const BYTE* ip1;
+ const BYTE* ip2;
+ const BYTE* ip3;
+ U32 current0;
+
+
+ size_t hash0; /* hash for ip0 */
+ size_t hash1; /* hash for ip1 */
+ U32 idx; /* match idx for ip0 */
+ const BYTE* idxBase; /* base pointer for idx */
+
+ U32 offcode;
+ const BYTE* match0;
+ size_t mLength;
+ const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
+
+ size_t step;
+ const BYTE* nextStep;
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
(void)hasStep; /* not currently specialized on whether it's accelerated */
@@ -579,75 +760,202 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
if (prefixStartIndex == dictStartIndex)
return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t h = ZSTD_hashPtr(ip, hlog, mls);
- const U32 matchIndex = hashTable[h];
- const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
- const BYTE* match = matchBase + matchIndex;
- const U32 curr = (U32)(ip-base);
- const U32 repIndex = curr + 1 - offset_1;
- const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
- hashTable[h] = curr; /* update hash table */
- DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
-
- if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
- & (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
- size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
- ip++;
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength);
- ip += rLength;
- anchor = ip;
- } else {
- if ( (matchIndex < dictStartIndex) ||
- (MEM_read32(match) != MEM_read32(ip)) ) {
- assert(stepSize >= 1);
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
- continue;
+ { U32 const curr = (U32)(ip0 - base);
+ U32 const maxRep = curr - dictStartIndex;
+ if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
+ if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
+ }
+
+ /* start each op */
+_start: /* Requires: ip0 */
+
+ step = stepSize;
+ nextStep = ip0 + kStepIncr;
+
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
+ ip1 = ip0 + 1;
+ ip2 = ip0 + step;
+ ip3 = ip2 + 1;
+
+ if (ip3 >= ilimit) {
+ goto _cleanup;
+ }
+
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
+
+ idx = hashTable[hash0];
+ idxBase = idx < prefixStartIndex ? dictBase : base;
+
+ do {
+ { /* load repcode match for ip[2] */
+ U32 const current2 = (U32)(ip2 - base);
+ U32 const repIndex = current2 - offset_1;
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ U32 rval;
+ if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
+ & (offset_1 > 0) ) {
+ rval = MEM_read32(repBase + repIndex);
+ } else {
+ rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
}
- { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
- const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
- U32 const offset = curr - matchIndex;
- size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
- while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- offset_2 = offset_1; offset_1 = offset; /* update offset history */
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
- ip += mLength;
- anchor = ip;
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ /* check repcode at ip[2] */
+ if (MEM_read32(ip2) == rval) {
+ ip0 = ip2;
+ match0 = repBase + repIndex;
+ matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ assert((match0 != prefixStart) & (match0 != dictStart));
+ mLength = ip0[-1] == match0[-1];
+ ip0 -= mLength;
+ match0 -= mLength;
+ offcode = REPCODE1_TO_OFFBASE;
+ mLength += 4;
+ goto _match;
} }
- if (ip <= ilimit) {
- /* Fill Table */
- hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
- U32 const repIndex2 = current2 - offset_2;
- const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
- const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
- { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2);
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- } } }
+ { /* load match for ip[0] */
+ U32 const mval = idx >= dictStartIndex ?
+ MEM_read32(idxBase + idx) :
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
+
+ /* check match at ip[0] */
+ if (MEM_read32(ip0) == mval) {
+ /* found a match! */
+ goto _offset;
+ } }
+
+ /* lookup ip[1] */
+ idx = hashTable[hash1];
+ idxBase = idx < prefixStartIndex ? dictBase : base;
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip3;
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ { /* load match for ip[0] */
+ U32 const mval = idx >= dictStartIndex ?
+ MEM_read32(idxBase + idx) :
+ MEM_read32(ip0) ^ 1; /* guaranteed not to match */
+
+ /* check match at ip[0] */
+ if (MEM_read32(ip0) == mval) {
+ /* found a match! */
+ goto _offset;
+ } }
+
+ /* lookup ip[1] */
+ idx = hashTable[hash1];
+ idxBase = idx < prefixStartIndex ? dictBase : base;
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip0 + step;
+ ip3 = ip1 + step;
+
+ /* calculate step */
+ if (ip2 >= nextStep) {
+ step++;
+ PREFETCH_L1(ip1 + 64);
+ PREFETCH_L1(ip1 + 128);
+ nextStep += kStepIncr;
+ }
+ } while (ip3 < ilimit);
+
+_cleanup:
+ /* Note that there are probably still a couple positions we could search.
+ * However, it seems to be a meaningful performance hit to try to search
+ * them. So let's not. */
+
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
/* save reps for next block */
- rep[0] = offset_1;
- rep[1] = offset_2;
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
/* Return the last literals size */
return (size_t)(iend - anchor);
+
+_offset: /* Requires: ip0, idx, idxBase */
+
+ /* Compute the offset code. */
+ { U32 const offset = current0 - idx;
+ const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
+ matchEnd = idx < prefixStartIndex ? dictEnd : iend;
+ match0 = idxBase + idx;
+ offset_2 = offset_1;
+ offset_1 = offset;
+ offcode = OFFSET_TO_OFFBASE(offset);
+ mLength = 4;
+
+ /* Count the backwards match length. */
+ while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
+ ip0--;
+ match0--;
+ mLength++;
+ } }
+
+_match: /* Requires: ip0, match0, offcode, matchEnd */
+
+ /* Count the forward length. */
+ assert(matchEnd != 0);
+ mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
+
+ ip0 += mLength;
+ anchor = ip0;
+
+ /* write next hash table entry */
+ if (ip1 < ip0) {
+ hashTable[hash1] = (U32)(ip1 - base);
+ }
+
+ /* Fill table and check for immediate repcode. */
+ if (ip0 <= ilimit) {
+ /* Fill Table */
+ assert(base+current0+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+
+ while (ip0 <= ilimit) {
+ U32 const repIndex2 = (U32)(ip0-base) - offset_2;
+ const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) & (offset_2 > 0))
+ && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+ ip0 += repLength2;
+ anchor = ip0;
+ continue;
+ }
+ break;
+ } }
+
+ goto _start;
}
ZSTD_GEN_FAST_FN(extDict, 4, 0)
@@ -656,10 +964,11 @@ ZSTD_GEN_FAST_FN(extDict, 6, 0)
ZSTD_GEN_FAST_FN(extDict, 7, 0)
size_t ZSTD_compressBlock_fast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
+ assert(ms->dictMatchState == NULL);
switch(mls)
{
default: /* includes case 3 */
diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h
index fddc2f532d21..04fde0a72a4e 100644
--- a/lib/zstd/compress/zstd_fast.h
+++ b/lib/zstd/compress/zstd_fast.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,21 +12,20 @@
#ifndef ZSTD_FAST_H
#define ZSTD_FAST_H
-
#include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h"
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
- void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+void ZSTD_fillHashTable(ZSTD_MatchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp);
size_t ZSTD_compressBlock_fast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_fast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_fast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
#endif /* ZSTD_FAST_H */
diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
index 0298a01a7504..88e2501fe3ef 100644
--- a/lib/zstd/compress/zstd_lazy.c
+++ b/lib/zstd/compress/zstd_lazy.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -10,14 +11,23 @@
#include "zstd_compress_internal.h"
#include "zstd_lazy.h"
+#include "../common/bits.h" /* ZSTD_countTrailingZeros64 */
+
+#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
+
+#define kLazySkippingStep 8
/*-*************************************
* Binary Tree search
***************************************/
-static void
-ZSTD_updateDUBT(ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_updateDUBT(ZSTD_MatchState_t* ms,
const BYTE* ip, const BYTE* iend,
U32 mls)
{
@@ -60,8 +70,9 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms,
* sort one already inserted but unsorted position
* assumption : curr >= btlow == (curr - btmask)
* doesn't fail */
-static void
-ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_insertDUBT1(const ZSTD_MatchState_t* ms,
U32 curr, const BYTE* inputEnd,
U32 nbCompares, U32 btLow,
const ZSTD_dictMode_e dictMode)
@@ -149,9 +160,10 @@ ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
}
-static size_t
-ZSTD_DUBT_findBetterDictMatch (
- const ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_DUBT_findBetterDictMatch (
+ const ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
size_t* offsetPtr,
size_t bestLength,
@@ -159,7 +171,7 @@ ZSTD_DUBT_findBetterDictMatch (
U32 const mls,
const ZSTD_dictMode_e dictMode)
{
- const ZSTD_matchState_t * const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t * const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
const U32 * const dictHashTable = dms->hashTable;
U32 const hashLog = dmsCParams->hashLog;
@@ -197,8 +209,8 @@ ZSTD_DUBT_findBetterDictMatch (
U32 matchIndex = dictMatchIndex + dictIndexDelta;
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
- curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, STORE_OFFSET(curr - matchIndex), dictMatchIndex, matchIndex);
- bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex);
+ bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
}
if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
break; /* drop, to guarantee consistency (miss a little bit of compression) */
@@ -218,7 +230,7 @@ ZSTD_DUBT_findBetterDictMatch (
}
if (bestLength >= MINMATCH) {
- U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
+ U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offsetPtr); (void)mIndex;
DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
}
@@ -227,10 +239,11 @@ ZSTD_DUBT_findBetterDictMatch (
}
-static size_t
-ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
- size_t* offsetPtr,
+ size_t* offBasePtr,
U32 const mls,
const ZSTD_dictMode_e dictMode)
{
@@ -327,8 +340,8 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
if (matchLength > bestLength) {
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
- if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
- bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) )
+ bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex);
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
if (dictMode == ZSTD_dictMatchState) {
nbCompares = 0; /* in addition to avoiding checking any
@@ -361,16 +374,16 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
if (dictMode == ZSTD_dictMatchState && nbCompares) {
bestLength = ZSTD_DUBT_findBetterDictMatch(
ms, ip, iend,
- offsetPtr, bestLength, nbCompares,
+ offBasePtr, bestLength, nbCompares,
mls, dictMode);
}
assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
if (bestLength >= MINMATCH) {
- U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
+ U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offBasePtr); (void)mIndex;
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
- curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ curr, (U32)bestLength, (U32)*offBasePtr, mIndex);
}
return bestLength;
}
@@ -378,24 +391,25 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
/* ZSTD_BtFindBestMatch() : Tree updater, providing best match */
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_BtFindBestMatch( ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
+ size_t* offBasePtr,
const U32 mls /* template */,
const ZSTD_dictMode_e dictMode)
{
DEBUGLOG(7, "ZSTD_BtFindBestMatch");
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
ZSTD_updateDUBT(ms, ip, iLimit, mls);
- return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
+ return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode);
}
/* *********************************
* Dedicated dict search
***********************************/
-void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip)
{
const BYTE* const base = ms->window.base;
U32 const target = (U32)(ip - base);
@@ -514,7 +528,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B
*/
FORCE_INLINE_TEMPLATE
size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
- const ZSTD_matchState_t* const dms,
+ const ZSTD_MatchState_t* const dms,
const BYTE* const ip, const BYTE* const iLimit,
const BYTE* const prefixStart, const U32 curr,
const U32 dictLimit, const size_t ddsIdx) {
@@ -561,7 +575,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
- *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
if (ip+currentMl == iLimit) {
/* best possible, avoids read overflow on next attempt */
return ml;
@@ -598,7 +612,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
- *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
}
@@ -614,10 +628,12 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
/* Update chains up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
-FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
- ZSTD_matchState_t* ms,
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_insertAndFindFirstIndex_internal(
+ ZSTD_MatchState_t* ms,
const ZSTD_compressionParameters* const cParams,
- const BYTE* ip, U32 const mls)
+ const BYTE* ip, U32 const mls, U32 const lazySkipping)
{
U32* const hashTable = ms->hashTable;
const U32 hashLog = cParams->hashLog;
@@ -632,21 +648,25 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
hashTable[h] = idx;
idx++;
+ /* Stop inserting every position when in the lazy skipping mode. */
+ if (lazySkipping)
+ break;
}
ms->nextToUpdate = target;
return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
}
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip) {
const ZSTD_compressionParameters* const cParams = &ms->cParams;
- return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0);
}
/* inlining is important to hardwire a hot branch (template emulation) */
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_HcFindBestMatch(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 mls, const ZSTD_dictMode_e dictMode)
@@ -670,7 +690,7 @@ size_t ZSTD_HcFindBestMatch(
U32 nbAttempts = 1U << cParams->searchLog;
size_t ml=4-1;
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
@@ -684,14 +704,15 @@ size_t ZSTD_HcFindBestMatch(
}
/* HC4 match finder */
- matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
+ matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, ms->lazySkipping);
for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
size_t currentMl=0;
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
const BYTE* const match = base + matchIndex;
assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
- if (match[ml] == ip[ml]) /* potentially better */
+ /* read 4B starting from (match + ml + 1 - sizeof(U32)) */
+ if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */
currentMl = ZSTD_count(ip, match, iLimit);
} else {
const BYTE* const match = dictBase + matchIndex;
@@ -703,7 +724,7 @@ size_t ZSTD_HcFindBestMatch(
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
- *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
@@ -739,7 +760,7 @@ size_t ZSTD_HcFindBestMatch(
if (currentMl > ml) {
ml = currentMl;
assert(curr > matchIndex + dmsIndexDelta);
- *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
@@ -756,8 +777,6 @@ size_t ZSTD_HcFindBestMatch(
* (SIMD) Row-based matchfinder
***********************************/
/* Constants for row-based hash */
-#define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */
-#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
@@ -769,64 +788,19 @@ typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 repr
* Starting from the LSB, returns the idx of the next non-zero bit.
* Basically counting the nb of trailing zeroes.
*/
-static U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
- assert(val != 0);
-# if (defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))
- if (sizeof(size_t) == 4) {
- U32 mostSignificantWord = (U32)(val >> 32);
- U32 leastSignificantWord = (U32)val;
- if (leastSignificantWord == 0) {
- return 32 + (U32)__builtin_ctz(mostSignificantWord);
- } else {
- return (U32)__builtin_ctz(leastSignificantWord);
- }
- } else {
- return (U32)__builtin_ctzll(val);
- }
-# else
- /* Software ctz version: http://aggregate.org/MAGIC/#Trailing%20Zero%20Count
- * and: https://stackoverflow.com/questions/2709430/count-number-of-bits-in-a-64-bit-long-big-integer
- */
- val = ~val & (val - 1ULL); /* Lowest set bit mask */
- val = val - ((val >> 1) & 0x5555555555555555);
- val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
- return (U32)((((val + (val >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
-# endif
-}
-
-/* ZSTD_rotateRight_*():
- * Rotates a bitfield to the right by "count" bits.
- * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
- */
-FORCE_INLINE_TEMPLATE
-U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
- assert(count < 64);
- count &= 0x3F; /* for fickle pattern recognition */
- return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
-}
-
-FORCE_INLINE_TEMPLATE
-U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
- assert(count < 32);
- count &= 0x1F; /* for fickle pattern recognition */
- return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
-}
-
-FORCE_INLINE_TEMPLATE
-U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
- assert(count < 16);
- count &= 0x0F; /* for fickle pattern recognition */
- return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
+MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
+ return ZSTD_countTrailingZeros64(val);
}
/* ZSTD_row_nextIndex():
* Returns the next index to insert at within a tagTable row, and updates the "head"
- * value to reflect the update. Essentially cycles backwards from [0, {entries per row})
+ * value to reflect the update. Essentially cycles backwards from [1, {entries per row})
*/
FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
- U32 const next = (*tagRow - 1) & rowMask;
- *tagRow = (BYTE)next;
- return next;
+ U32 next = (*tagRow-1) & rowMask;
+ next += (next == 0) ? rowMask : 0; /* skip first position */
+ *tagRow = (BYTE)next;
+ return next;
}
/* ZSTD_isAligned():
@@ -840,7 +814,7 @@ MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
/* ZSTD_row_prefetch():
* Performs prefetching for the hashTable and tagTable at a given row.
*/
-FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) {
+FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) {
PREFETCH_L1(hashTable + relRow);
if (rowLog >= 5) {
PREFETCH_L1(hashTable + relRow + 16);
@@ -859,18 +833,20 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* ta
* Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
* but not beyond iLimit.
*/
-FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_row_fillHashCache(ZSTD_MatchState_t* ms, const BYTE* base,
U32 const rowLog, U32 const mls,
U32 idx, const BYTE* const iLimit)
{
U32 const* const hashTable = ms->hashTable;
- U16 const* const tagTable = ms->tagTable;
+ BYTE const* const tagTable = ms->tagTable;
U32 const hashLog = ms->rowHashLog;
U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
for (; idx < lim; ++idx) {
- U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const hash = (U32)ZSTD_hashPtrSalted(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);
U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
@@ -885,12 +861,15 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const B
* Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
* base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
*/
-FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
- U16 const* tagTable, BYTE const* base,
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
+ BYTE const* tagTable, BYTE const* base,
U32 idx, U32 const hashLog,
- U32 const rowLog, U32 const mls)
+ U32 const rowLog, U32 const mls,
+ U64 const hashSalt)
{
- U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const newHash = (U32)ZSTD_hashPtrSalted(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);
U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
{ U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
@@ -902,28 +881,29 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTab
/* ZSTD_row_update_internalImpl():
* Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
*/
-FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
- U32 updateStartIdx, U32 const updateEndIdx,
- U32 const mls, U32 const rowLog,
- U32 const rowMask, U32 const useCache)
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms,
+ U32 updateStartIdx, U32 const updateEndIdx,
+ U32 const mls, U32 const rowLog,
+ U32 const rowMask, U32 const useCache)
{
U32* const hashTable = ms->hashTable;
- U16* const tagTable = ms->tagTable;
+ BYTE* const tagTable = ms->tagTable;
U32 const hashLog = ms->rowHashLog;
const BYTE* const base = ms->window.base;
DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);
for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {
- U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls)
- : (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt)
+ : (U32)ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);
U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
U32* const row = hashTable + relRow;
- BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte.
- Explicit cast allows us to get exact desired position within each row */
+ BYTE* tagRow = tagTable + relRow;
U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
- assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls));
- ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK;
+ assert(hash == ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt));
+ tagRow[pos] = hash & ZSTD_ROW_HASH_TAG_MASK;
row[pos] = updateStartIdx;
}
}
@@ -932,9 +912,11 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
* Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
* Skips sections of long matches as is necessary.
*/
-FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
- U32 const mls, U32 const rowLog,
- U32 const rowMask, U32 const useCache)
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_row_update_internal(ZSTD_MatchState_t* ms, const BYTE* ip,
+ U32 const mls, U32 const rowLog,
+ U32 const rowMask, U32 const useCache)
{
U32 idx = ms->nextToUpdate;
const BYTE* const base = ms->window.base;
@@ -965,13 +947,41 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const
* External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
* processing.
*/
-void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
+void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip) {
const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
const U32 rowMask = (1u << rowLog) - 1;
const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
- ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */);
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */);
+}
+
+/* Returns the mask width of bits group of which will be set to 1. Given not all
+ * architectures have easy movemask instruction, this helps to iterate over
+ * groups of bits easier and faster.
+ */
+FORCE_INLINE_TEMPLATE U32
+ZSTD_row_matchMaskGroupWidth(const U32 rowEntries)
+{
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
+ assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
+ (void)rowEntries;
+#if defined(ZSTD_ARCH_ARM_NEON)
+ /* NEON path only works for little endian */
+ if (!MEM_isLittleEndian()) {
+ return 1;
+ }
+ if (rowEntries == 16) {
+ return 4;
+ }
+ if (rowEntries == 32) {
+ return 2;
+ }
+ if (rowEntries == 64) {
+ return 1;
+ }
+#endif
+ return 1;
}
#if defined(ZSTD_ARCH_X86_SSE2)
@@ -994,71 +1004,82 @@ ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U
}
#endif
-/* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches
- * the hash at the nth position in a row of the tagTable.
- * Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield
- * to match up with the actual layout of the entries within the hashTable */
+#if defined(ZSTD_ARCH_ARM_NEON)
+FORCE_INLINE_TEMPLATE ZSTD_VecMask
+ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag, const U32 headGrouped)
+{
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
+ if (rowEntries == 16) {
+ /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits.
+ * After that groups of 4 bits represent the equalMask. We lower
+ * all bits except the highest in these groups by doing AND with
+ * 0x88 = 0b10001000.
+ */
+ const uint8x16_t chunk = vld1q_u8(src);
+ const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
+ const uint8x8_t res = vshrn_n_u16(equalMask, 4);
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0);
+ return ZSTD_rotateRight_U64(matches, headGrouped) & 0x8888888888888888ull;
+ } else if (rowEntries == 32) {
+ /* Same idea as with rowEntries == 16 but doing AND with
+ * 0x55 = 0b01010101.
+ */
+ const uint16x8x2_t chunk = vld2q_u16((const uint16_t*)(const void*)src);
+ const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
+ const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
+ const uint8x16_t dup = vdupq_n_u8(tag);
+ const uint8x8_t t0 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk0, dup)), 6);
+ const uint8x8_t t1 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk1, dup)), 6);
+ const uint8x8_t res = vsli_n_u8(t0, t1, 4);
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0) ;
+ return ZSTD_rotateRight_U64(matches, headGrouped) & 0x5555555555555555ull;
+ } else { /* rowEntries == 64 */
+ const uint8x16x4_t chunk = vld4q_u8(src);
+ const uint8x16_t dup = vdupq_n_u8(tag);
+ const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
+ const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
+ const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
+ const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
+
+ const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
+ const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
+ const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
+ const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
+ const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
+ return ZSTD_rotateRight_U64(matches, headGrouped);
+ }
+}
+#endif
+
+/* Returns a ZSTD_VecMask (U64) that has the nth group (determined by
+ * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag"
+ * matches the hash at the nth position in a row of the tagTable.
+ * Each row is a circular buffer beginning at the value of "headGrouped". So we
+ * must rotate the "matches" bitfield to match up with the actual layout of the
+ * entries within the hashTable */
FORCE_INLINE_TEMPLATE ZSTD_VecMask
-ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries)
+ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries)
{
- const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET;
+ const BYTE* const src = tagRow;
assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
+ assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ZSTD_VecMask) * 8);
#if defined(ZSTD_ARCH_X86_SSE2)
- return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, head);
+ return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, headGrouped);
#else /* SW or NEON-LE */
# if defined(ZSTD_ARCH_ARM_NEON)
/* This NEON path only works for little endian - otherwise use SWAR below */
if (MEM_isLittleEndian()) {
- if (rowEntries == 16) {
- const uint8x16_t chunk = vld1q_u8(src);
- const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
- const uint16x8_t t0 = vshlq_n_u16(equalMask, 7);
- const uint32x4_t t1 = vreinterpretq_u32_u16(vsriq_n_u16(t0, t0, 14));
- const uint64x2_t t2 = vreinterpretq_u64_u32(vshrq_n_u32(t1, 14));
- const uint8x16_t t3 = vreinterpretq_u8_u64(vsraq_n_u64(t2, t2, 28));
- const U16 hi = (U16)vgetq_lane_u8(t3, 8);
- const U16 lo = (U16)vgetq_lane_u8(t3, 0);
- return ZSTD_rotateRight_U16((hi << 8) | lo, head);
- } else if (rowEntries == 32) {
- const uint16x8x2_t chunk = vld2q_u16((const U16*)(const void*)src);
- const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
- const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
- const uint8x16_t equalMask0 = vceqq_u8(chunk0, vdupq_n_u8(tag));
- const uint8x16_t equalMask1 = vceqq_u8(chunk1, vdupq_n_u8(tag));
- const int8x8_t pack0 = vqmovn_s16(vreinterpretq_s16_u8(equalMask0));
- const int8x8_t pack1 = vqmovn_s16(vreinterpretq_s16_u8(equalMask1));
- const uint8x8_t t0 = vreinterpret_u8_s8(pack0);
- const uint8x8_t t1 = vreinterpret_u8_s8(pack1);
- const uint8x8_t t2 = vsri_n_u8(t1, t0, 2);
- const uint8x8x2_t t3 = vuzp_u8(t2, t0);
- const uint8x8_t t4 = vsri_n_u8(t3.val[1], t3.val[0], 4);
- const U32 matches = vget_lane_u32(vreinterpret_u32_u8(t4), 0);
- return ZSTD_rotateRight_U32(matches, head);
- } else { /* rowEntries == 64 */
- const uint8x16x4_t chunk = vld4q_u8(src);
- const uint8x16_t dup = vdupq_n_u8(tag);
- const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
- const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
- const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
- const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
-
- const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
- const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
- const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
- const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
- const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
- const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
- return ZSTD_rotateRight_U64(matches, head);
- }
+ return ZSTD_row_getNEONMask(rowEntries, src, tag, headGrouped);
}
# endif /* ZSTD_ARCH_ARM_NEON */
/* SWAR */
- { const size_t chunkSize = sizeof(size_t);
+ { const int chunkSize = sizeof(size_t);
const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
const size_t xFF = ~((size_t)0);
const size_t x01 = xFF / 0xFF;
@@ -1091,11 +1112,11 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head,
}
matches = ~matches;
if (rowEntries == 16) {
- return ZSTD_rotateRight_U16((U16)matches, head);
+ return ZSTD_rotateRight_U16((U16)matches, headGrouped);
} else if (rowEntries == 32) {
- return ZSTD_rotateRight_U32((U32)matches, head);
+ return ZSTD_rotateRight_U32((U32)matches, headGrouped);
} else {
- return ZSTD_rotateRight_U64((U64)matches, head);
+ return ZSTD_rotateRight_U64((U64)matches, headGrouped);
}
}
#endif
@@ -1103,29 +1124,30 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head,
/* The high-level approach of the SIMD row based match finder is as follows:
* - Figure out where to insert the new entry:
- * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
- * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
+ * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index.
+ * - The hash is salted by a value that changes on every context reset, so when the same table is used
+ * we will avoid collisions that would otherwise slow us down by introducing phantom matches.
+ * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines
* which row to insert into.
- * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
- * be considered as a circular buffer with a "head" index that resides in the tagTable.
- * - Also insert the "tag" into the equivalent row and position in the tagTable.
- * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
- * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
- * for alignment/performance reasons, leaving some bytes unused.
- * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
+ * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can
+ * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes
+ * per row).
+ * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and
* generate a bitfield that we can cycle through to check the collisions in the hash table.
* - Pick the longest match.
+ * - Insert the tag into the equivalent row and position in the tagTable.
*/
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_RowFindBestMatch(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 mls, const ZSTD_dictMode_e dictMode,
const U32 rowLog)
{
U32* const hashTable = ms->hashTable;
- U16* const tagTable = ms->tagTable;
+ BYTE* const tagTable = ms->tagTable;
U32* const hashCache = ms->hashCache;
const U32 hashLog = ms->rowHashLog;
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -1143,11 +1165,14 @@ size_t ZSTD_RowFindBestMatch(
const U32 rowEntries = (1U << rowLog);
const U32 rowMask = rowEntries - 1;
const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
+ const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries);
+ const U64 hashSalt = ms->hashSalt;
U32 nbAttempts = 1U << cappedSearchLog;
size_t ml=4-1;
+ U32 hash;
/* DMS/DDS variables that may be referenced laster */
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
/* Initialize the following variables to satisfy static analyzer */
size_t ddsIdx = 0;
@@ -1168,7 +1193,7 @@ size_t ZSTD_RowFindBestMatch(
if (dictMode == ZSTD_dictMatchState) {
/* Prefetch DMS rows */
U32* const dmsHashTable = dms->hashTable;
- U16* const dmsTagTable = dms->tagTable;
+ BYTE* const dmsTagTable = dms->tagTable;
U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
@@ -1178,23 +1203,34 @@ size_t ZSTD_RowFindBestMatch(
}
/* Update the hashTable and tagTable up to (but not including) ip */
- ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
+ if (!ms->lazySkipping) {
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
+ hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls, hashSalt);
+ } else {
+ /* Stop inserting every position when in the lazy skipping mode.
+ * The hash cache is also not kept up to date in this mode.
+ */
+ hash = (U32)ZSTD_hashPtrSalted(ip, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);
+ ms->nextToUpdate = curr;
+ }
+ ms->hashSaltEntropy += hash; /* collect salt entropy */
+
{ /* Get the hash for ip, compute the appropriate row */
- U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls);
U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
U32* const row = hashTable + relRow;
BYTE* tagRow = (BYTE*)(tagTable + relRow);
- U32 const head = *tagRow & rowMask;
+ U32 const headGrouped = (*tagRow & rowMask) * groupWidth;
U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
size_t numMatches = 0;
size_t currMatch = 0;
- ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries);
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries);
/* Cycle through the matches and prefetch */
- for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
- U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
+ for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {
+ U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
U32 const matchIndex = row[matchPos];
+ if(matchPos == 0) continue;
assert(numMatches < rowEntries);
if (matchIndex < lowLimit)
break;
@@ -1204,13 +1240,14 @@ size_t ZSTD_RowFindBestMatch(
PREFETCH_L1(dictBase + matchIndex);
}
matchBuffer[numMatches++] = matchIndex;
+ --nbAttempts;
}
/* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
in ZSTD_row_update_internal() at the next search. */
{
U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
- tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag;
+ tagRow[pos] = (BYTE)tag;
row[pos] = ms->nextToUpdate++;
}
@@ -1224,7 +1261,8 @@ size_t ZSTD_RowFindBestMatch(
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
const BYTE* const match = base + matchIndex;
assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
- if (match[ml] == ip[ml]) /* potentially better */
+ /* read 4B starting from (match + ml + 1 - sizeof(U32)) */
+ if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */
currentMl = ZSTD_count(ip, match, iLimit);
} else {
const BYTE* const match = dictBase + matchIndex;
@@ -1236,7 +1274,7 @@ size_t ZSTD_RowFindBestMatch(
/* Save best solution */
if (currentMl > ml) {
ml = currentMl;
- *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
}
@@ -1254,19 +1292,21 @@ size_t ZSTD_RowFindBestMatch(
const U32 dmsSize = (U32)(dmsEnd - dmsBase);
const U32 dmsIndexDelta = dictLimit - dmsSize;
- { U32 const head = *dmsTagRow & rowMask;
+ { U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth;
U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
size_t numMatches = 0;
size_t currMatch = 0;
- ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries);
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries);
- for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
- U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
+ for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {
+ U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
U32 const matchIndex = dmsRow[matchPos];
+ if(matchPos == 0) continue;
if (matchIndex < dmsLowestIndex)
break;
PREFETCH_L1(dmsBase + matchIndex);
matchBuffer[numMatches++] = matchIndex;
+ --nbAttempts;
}
/* Return the longest match */
@@ -1285,7 +1325,7 @@ size_t ZSTD_RowFindBestMatch(
if (currentMl > ml) {
ml = currentMl;
assert(curr > matchIndex + dmsIndexDelta);
- *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));
if (ip+currentMl == iLimit) break;
}
}
@@ -1301,7 +1341,7 @@ size_t ZSTD_RowFindBestMatch(
* ZSTD_searchMax() dispatches to the correct implementation function.
*
* TODO: The start of the search function involves loading and calculating a
- * bunch of constants from the ZSTD_matchState_t. These computations could be
+ * bunch of constants from the ZSTD_MatchState_t. These computations could be
* done in an initialization function, and saved somewhere in the match state.
* Then we could pass a pointer to the saved state instead of the match state,
* and avoid duplicate computations.
@@ -1325,7 +1365,7 @@ size_t ZSTD_RowFindBestMatch(
#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
- ZSTD_matchState_t* ms, \
+ ZSTD_MatchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offBasePtr) \
{ \
@@ -1335,7 +1375,7 @@ size_t ZSTD_RowFindBestMatch(
#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
- ZSTD_matchState_t* ms, \
+ ZSTD_MatchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
@@ -1345,7 +1385,7 @@ size_t ZSTD_RowFindBestMatch(
#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
- ZSTD_matchState_t* ms, \
+ ZSTD_MatchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
@@ -1446,7 +1486,7 @@ typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searc
* If a match is found its offset is stored in @p offsetPtr.
*/
FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const BYTE* ip,
const BYTE* iend,
size_t* offsetPtr,
@@ -1472,9 +1512,10 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
* Common parser - lazy strategy
*********************************/
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_compressBlock_lazy_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore,
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_compressBlock_lazy_generic(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth,
@@ -1491,12 +1532,13 @@ ZSTD_compressBlock_lazy_generic(
const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
- U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+ U32 offset_1 = rep[0], offset_2 = rep[1];
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
const int isDMS = dictMode == ZSTD_dictMatchState;
const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
const int isDxS = isDMS || isDDS;
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
@@ -1512,8 +1554,8 @@ ZSTD_compressBlock_lazy_generic(
U32 const curr = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
U32 const maxRep = curr - windowLow;
- if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+ if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
}
if (isDxS) {
/* dictMatchState repCode checks don't currently handle repCode == 0
@@ -1522,10 +1564,11 @@ ZSTD_compressBlock_lazy_generic(
assert(offset_2 <= dictAndPrefixLength);
}
+ /* Reset the lazy skipping state */
+ ms->lazySkipping = 0;
+
if (searchMethod == search_rowHash) {
- ZSTD_row_fillHashCache(ms, base, rowLog,
- MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
- ms->nextToUpdate, ilimit);
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
}
/* Match Loop */
@@ -1537,7 +1580,7 @@ ZSTD_compressBlock_lazy_generic(
#endif
while (ip < ilimit) {
size_t matchLength=0;
- size_t offcode=STORE_REPCODE_1;
+ size_t offBase = REPCODE1_TO_OFFBASE;
const BYTE* start=ip+1;
DEBUGLOG(7, "search baseline (depth 0)");
@@ -1548,7 +1591,7 @@ ZSTD_compressBlock_lazy_generic(
&& repIndex < prefixLowestIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
@@ -1562,14 +1605,23 @@ ZSTD_compressBlock_lazy_generic(
}
/* first search (depth 0) */
- { size_t offsetFound = 999999999;
- size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offsetFound, mls, rowLog, searchMethod, dictMode);
+ { size_t offbaseFound = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode);
if (ml2 > matchLength)
- matchLength = ml2, start = ip, offcode=offsetFound;
+ matchLength = ml2, start = ip, offBase = offbaseFound;
}
if (matchLength < 4) {
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ size_t const step = ((size_t)(ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */;
+ ip += step;
+ /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.
+ * In this mode we stop inserting every position into our tables, and only insert
+ * positions that we search, which is one in step positions.
+ * The exact cutoff is flexible, I've just chosen a number that is reasonably high,
+ * so we minimize the compression ratio loss in "normal" scenarios. This mode gets
+ * triggered once we've gone 2KB without finding any matches.
+ */
+ ms->lazySkipping = step > kLazySkippingStep;
continue;
}
@@ -1579,34 +1631,34 @@ ZSTD_compressBlock_lazy_generic(
DEBUGLOG(7, "search depth 1");
ip ++;
if ( (dictMode == ZSTD_noDict)
- && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
int const gain2 = (int)(mlRep * 3);
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
- matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
}
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
int const gain2 = (int)(mlRep * 3);
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
- matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
}
}
- { size_t offset2=999999999;
- size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, dictMode);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
+ { size_t ofbCandidate=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offcode = offset2, start = ip;
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
continue; /* search a better one */
} }
@@ -1615,34 +1667,34 @@ ZSTD_compressBlock_lazy_generic(
DEBUGLOG(7, "search depth 2");
ip ++;
if ( (dictMode == ZSTD_noDict)
- && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
int const gain2 = (int)(mlRep * 4);
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
- matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
}
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
int const gain2 = (int)(mlRep * 4);
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
- matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
}
}
- { size_t offset2=999999999;
- size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, dictMode);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
+ { size_t ofbCandidate=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offcode = offset2, start = ip;
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
continue;
} } }
break; /* nothing found : store previous solution */
@@ -1653,26 +1705,33 @@ ZSTD_compressBlock_lazy_generic(
* notably if `value` is unsigned, resulting in a large positive `-value`.
*/
/* catch up */
- if (STORED_IS_OFFSET(offcode)) {
+ if (OFFBASE_IS_OFFSET(offBase)) {
if (dictMode == ZSTD_noDict) {
- while ( ((start > anchor) & (start - STORED_OFFSET(offcode) > prefixLowest))
- && (start[-1] == (start-STORED_OFFSET(offcode))[-1]) ) /* only search for offset within prefix */
+ while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest))
+ && (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */
{ start--; matchLength++; }
}
if (isDxS) {
- U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
+ U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));
const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
}
- offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
+ offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);
}
/* store sequence */
_storeSequence:
{ size_t const litLength = (size_t)(start - anchor);
- ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
anchor = ip = start + matchLength;
}
+ if (ms->lazySkipping) {
+ /* We've found a match, disable lazy skipping mode, and refill the hash cache. */
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
+ }
+ ms->lazySkipping = 0;
+ }
/* check immediate repcode */
if (isDxS) {
@@ -1682,12 +1741,12 @@ _storeSequence:
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase - dictIndexDelta + repIndex :
base + repIndex;
- if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+ if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
- offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
ip += matchLength;
anchor = ip;
continue;
@@ -1701,168 +1760,183 @@ _storeSequence:
&& (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
/* store sequence */
matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap repcodes */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
ip += matchLength;
anchor = ip;
continue; /* faster when present ... (?) */
} } }
- /* Save reps for next block */
- rep[0] = offset_1 ? offset_1 : savedOffset;
- rep[1] = offset_2 ? offset_2 : savedOffset;
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
+#endif /* build exclusions */
-size_t ZSTD_compressBlock_btlazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_lazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
}
-size_t ZSTD_compressBlock_lazy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
}
-size_t ZSTD_compressBlock_greedy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_btlazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
}
-size_t ZSTD_compressBlock_lazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
}
+#endif
-size_t ZSTD_compressBlock_lazy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_greedy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
}
-
-size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
}
-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
}
-/* Row-based matchfinder */
-size_t ZSTD_compressBlock_lazy2_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
}
+#endif
-size_t ZSTD_compressBlock_lazy_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_greedy_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
}
-size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
}
-size_t ZSTD_compressBlock_lazy_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_greedy_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
}
-
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
}
+#endif
-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
}
-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
}
+#endif
+#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_lazy_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth)
@@ -1886,12 +1960,13 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
+ /* Reset the lazy skipping state */
+ ms->lazySkipping = 0;
+
/* init */
ip += (ip == prefixStart);
if (searchMethod == search_rowHash) {
- ZSTD_row_fillHashCache(ms, base, rowLog,
- MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
- ms->nextToUpdate, ilimit);
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
}
/* Match Loop */
@@ -1903,7 +1978,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
#endif
while (ip < ilimit) {
size_t matchLength=0;
- size_t offcode=STORE_REPCODE_1;
+ size_t offBase = REPCODE1_TO_OFFBASE;
const BYTE* start=ip+1;
U32 curr = (U32)(ip-base);
@@ -1912,7 +1987,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const U32 repIndex = (U32)(curr+1 - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
& (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
@@ -1922,14 +1997,23 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
} }
/* first search (depth 0) */
- { size_t offsetFound = 999999999;
- size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offsetFound, mls, rowLog, searchMethod, ZSTD_extDict);
+ { size_t ofbCandidate = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
if (ml2 > matchLength)
- matchLength = ml2, start = ip, offcode=offsetFound;
+ matchLength = ml2, start = ip, offBase = ofbCandidate;
}
if (matchLength < 4) {
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ size_t const step = ((size_t)(ip-anchor) >> kSearchStrength);
+ ip += step + 1; /* jump faster over incompressible sections */
+ /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.
+ * In this mode we stop inserting every position into our tables, and only insert
+ * positions that we search, which is one in step positions.
+ * The exact cutoff is flexible, I've just chosen a number that is reasonably high,
+ * so we minimize the compression ratio loss in "normal" scenarios. This mode gets
+ * triggered once we've gone 2KB without finding any matches.
+ */
+ ms->lazySkipping = step > kLazySkippingStep;
continue;
}
@@ -1939,30 +2023,30 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
ip ++;
curr++;
/* check repCode */
- if (offcode) {
+ if (offBase) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
int const gain2 = (int)(repLength * 3);
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
if ((repLength >= 4) && (gain2 > gain1))
- matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
+ matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;
} }
/* search match, depth 1 */
- { size_t offset2=999999999;
- size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, ZSTD_extDict);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
+ { size_t ofbCandidate = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offcode = offset2, start = ip;
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
continue; /* search a better one */
} }
@@ -1971,50 +2055,57 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
ip ++;
curr++;
/* check repCode */
- if (offcode) {
+ if (offBase) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
int const gain2 = (int)(repLength * 4);
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
if ((repLength >= 4) && (gain2 > gain1))
- matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
+ matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;
} }
/* search match, depth 2 */
- { size_t offset2=999999999;
- size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, ZSTD_extDict);
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
+ { size_t ofbCandidate = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
if ((ml2 >= 4) && (gain2 > gain1)) {
- matchLength = ml2, offcode = offset2, start = ip;
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
continue;
} } }
break; /* nothing found : store previous solution */
}
/* catch up */
- if (STORED_IS_OFFSET(offcode)) {
- U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
+ if (OFFBASE_IS_OFFSET(offBase)) {
+ U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));
const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
- offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
+ offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);
}
/* store sequence */
_storeSequence:
{ size_t const litLength = (size_t)(start - anchor);
- ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
anchor = ip = start + matchLength;
}
+ if (ms->lazySkipping) {
+ /* We've found a match, disable lazy skipping mode, and refill the hash cache. */
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
+ }
+ ms->lazySkipping = 0;
+ }
/* check immediate repcode */
while (ip <= ilimit) {
@@ -2023,14 +2114,14 @@ _storeSequence:
const U32 repIndex = repCurrent - offset_2;
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ if ( (ZSTD_index_overlap_check(dictLimit, repIndex))
& (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
- offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset history */
- ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
ip += matchLength;
anchor = ip;
continue; /* faster when present ... (?) */
@@ -2045,58 +2136,65 @@ _storeSequence:
/* Return the last literals size */
return (size_t)(iend - anchor);
}
+#endif /* build exclusions */
-
+#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_greedy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
}
-size_t ZSTD_compressBlock_lazy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
}
+#endif
-size_t ZSTD_compressBlock_lazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
}
-size_t ZSTD_compressBlock_btlazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
}
+#endif
-size_t ZSTD_compressBlock_greedy_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
+
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
}
-size_t ZSTD_compressBlock_lazy_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
}
+#endif
-size_t ZSTD_compressBlock_lazy2_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
}
+#endif
diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
index e5bdf4df8dde..987a036d8bde 100644
--- a/lib/zstd/compress/zstd_lazy.h
+++ b/lib/zstd/compress/zstd_lazy.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,7 +12,6 @@
#ifndef ZSTD_LAZY_H
#define ZSTD_LAZY_H
-
#include "zstd_compress_internal.h"
/*
@@ -22,98 +22,173 @@
*/
#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
-void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
+#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
+
+#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip);
+void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip);
-void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip);
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+#endif
-size_t ZSTD_compressBlock_btlazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btlazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#define ZSTD_COMPRESSBLOCK_GREEDY ZSTD_compressBlock_greedy
+#define ZSTD_COMPRESSBLOCK_GREEDY_ROW ZSTD_compressBlock_greedy_row
+#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE ZSTD_compressBlock_greedy_dictMatchState
+#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW ZSTD_compressBlock_greedy_dictMatchState_row
+#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH ZSTD_compressBlock_greedy_dedicatedDictSearch
+#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_greedy_dedicatedDictSearch_row
+#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT ZSTD_compressBlock_greedy_extDict
+#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW ZSTD_compressBlock_greedy_extDict_row
+#else
+#define ZSTD_COMPRESSBLOCK_GREEDY NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_ROW NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT NULL
+#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW NULL
+#endif
+
+#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_dictMatchState_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
-size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+
+#define ZSTD_COMPRESSBLOCK_LAZY ZSTD_compressBlock_lazy
+#define ZSTD_COMPRESSBLOCK_LAZY_ROW ZSTD_compressBlock_lazy_row
+#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE ZSTD_compressBlock_lazy_dictMatchState
+#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy_dictMatchState_row
+#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy_dedicatedDictSearch
+#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy_dedicatedDictSearch_row
+#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT ZSTD_compressBlock_lazy_extDict
+#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW ZSTD_compressBlock_lazy_extDict_row
+#else
+#define ZSTD_COMPRESSBLOCK_LAZY NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT NULL
+#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW NULL
+#endif
+
+#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
-size_t ZSTD_compressBlock_greedy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+
+#define ZSTD_COMPRESSBLOCK_LAZY2 ZSTD_compressBlock_lazy2
+#define ZSTD_COMPRESSBLOCK_LAZY2_ROW ZSTD_compressBlock_lazy2_row
+#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE ZSTD_compressBlock_lazy2_dictMatchState
+#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy2_dictMatchState_row
+#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy2_dedicatedDictSearch
+#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy2_dedicatedDictSearch_row
+#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT ZSTD_compressBlock_lazy2_extDict
+#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW ZSTD_compressBlock_lazy2_extDict_row
+#else
+#define ZSTD_COMPRESSBLOCK_LAZY2 NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT NULL
+#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW NULL
+#endif
+
+#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_extDict_row(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btlazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
+#define ZSTD_COMPRESSBLOCK_BTLAZY2 ZSTD_compressBlock_btlazy2
+#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE ZSTD_compressBlock_btlazy2_dictMatchState
+#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT ZSTD_compressBlock_btlazy2_extDict
+#else
+#define ZSTD_COMPRESSBLOCK_BTLAZY2 NULL
+#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT NULL
+#endif
#endif /* ZSTD_LAZY_H */
diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
index dd86fc83e7dd..54eefad9cae6 100644
--- a/lib/zstd/compress/zstd_ldm.c
+++ b/lib/zstd/compress/zstd_ldm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,7 +17,7 @@
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
#include "zstd_ldm_geartab.h"
-#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_BUCKET_SIZE_LOG 4
#define LDM_MIN_MATCH_LENGTH 64
#define LDM_HASH_RLOG 7
@@ -133,21 +134,35 @@ done:
}
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
- ZSTD_compressionParameters const* cParams)
+ const ZSTD_compressionParameters* cParams)
{
params->windowLog = cParams->windowLog;
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
- if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
- if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (params->hashRateLog == 0) {
+ if (params->hashLog > 0) {
+ /* if params->hashLog is set, derive hashRateLog from it */
+ assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ if (params->windowLog > params->hashLog) {
+ params->hashRateLog = params->windowLog - params->hashLog;
+ }
+ } else {
+ assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9);
+ /* mapping from [fast, rate7] to [btultra2, rate4] */
+ params->hashRateLog = 7 - (cParams->strategy/3);
+ }
+ }
if (params->hashLog == 0) {
- params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
- assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ params->hashLog = BOUNDED(ZSTD_HASHLOG_MIN, params->windowLog - params->hashRateLog, ZSTD_HASHLOG_MAX);
}
- if (params->hashRateLog == 0) {
- params->hashRateLog = params->windowLog < params->hashLog
- ? 0
- : params->windowLog - params->hashLog;
+ if (params->minMatchLength == 0) {
+ params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (cParams->strategy >= ZSTD_btultra)
+ params->minMatchLength /= 2;
+ }
+ if (params->bucketSizeLog==0) {
+ assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9);
+ params->bucketSizeLog = BOUNDED(LDM_BUCKET_SIZE_LOG, (U32)cParams->strategy, ZSTD_LDM_BUCKETSIZELOG_MAX);
}
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
}
@@ -170,22 +185,22 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
/* ZSTD_ldm_getBucket() :
* Returns a pointer to the start of the bucket associated with hash. */
static ldmEntry_t* ZSTD_ldm_getBucket(
- ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+ const ldmState_t* ldmState, size_t hash, U32 const bucketSizeLog)
{
- return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+ return ldmState->hashTable + (hash << bucketSizeLog);
}
/* ZSTD_ldm_insertEntry() :
* Insert the entry with corresponding hash into the hash table */
static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
size_t const hash, const ldmEntry_t entry,
- ldmParams_t const ldmParams)
+ U32 const bucketSizeLog)
{
BYTE* const pOffset = ldmState->bucketOffsets + hash;
unsigned const offset = *pOffset;
- *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
- *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
+ *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry;
+ *pOffset = (BYTE)((offset + 1) & ((1u << bucketSizeLog) - 1));
}
@@ -234,7 +249,7 @@ static size_t ZSTD_ldm_countBackwardsMatch_2segments(
*
* The tables for the other strategies are filled within their
* block compressors. */
-static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+static size_t ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms,
void const* end)
{
const BYTE* const iend = (const BYTE*)end;
@@ -242,11 +257,15 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
switch(ms->cParams.strategy)
{
case ZSTD_fast:
- ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
+ ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
break;
case ZSTD_dfast:
- ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
+#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+ ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
+#else
+ assert(0); /* shouldn't be called: cparams should've been adjusted. */
+#endif
break;
case ZSTD_greedy:
@@ -269,7 +288,8 @@ void ZSTD_ldm_fillHashTable(
const BYTE* iend, ldmParams_t const* params)
{
U32 const minMatchLength = params->minMatchLength;
- U32 const hBits = params->hashLog - params->bucketSizeLog;
+ U32 const bucketSizeLog = params->bucketSizeLog;
+ U32 const hBits = params->hashLog - bucketSizeLog;
BYTE const* const base = ldmState->window.base;
BYTE const* const istart = ip;
ldmRollingHashState_t hashState;
@@ -284,7 +304,7 @@ void ZSTD_ldm_fillHashTable(
unsigned n;
numSplits = 0;
- hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, (size_t)(iend - ip), splits, &numSplits);
for (n = 0; n < numSplits; n++) {
if (ip + splits[n] >= istart + minMatchLength) {
@@ -295,7 +315,7 @@ void ZSTD_ldm_fillHashTable(
entry.offset = (U32)(split - base);
entry.checksum = (U32)(xxhash >> 32);
- ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, entry, params->bucketSizeLog);
}
}
@@ -309,7 +329,7 @@ void ZSTD_ldm_fillHashTable(
* Sets cctx->nextToUpdate to a position corresponding closer to anchor
* if it is far way
* (after a long match, only update tables a limited amount). */
-static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, const BYTE* anchor)
{
U32 const curr = (U32)(anchor - ms->window.base);
if (curr > ms->nextToUpdate + 1024) {
@@ -318,8 +338,10 @@ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
}
}
-static size_t ZSTD_ldm_generateSequences_internal(
- ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_ldm_generateSequences_internal(
+ ldmState_t* ldmState, RawSeqStore_t* rawSeqStore,
ldmParams_t const* params, void const* src, size_t srcSize)
{
/* LDM parameters */
@@ -373,7 +395,7 @@ static size_t ZSTD_ldm_generateSequences_internal(
candidates[n].split = split;
candidates[n].hash = hash;
candidates[n].checksum = (U32)(xxhash >> 32);
- candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
+ candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, params->bucketSizeLog);
PREFETCH_L1(candidates[n].bucket);
}
@@ -396,7 +418,7 @@ static size_t ZSTD_ldm_generateSequences_internal(
* the previous one, we merely register it in the hash table and
* move on */
if (split < anchor) {
- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
continue;
}
@@ -443,7 +465,7 @@ static size_t ZSTD_ldm_generateSequences_internal(
/* No match found -- insert an entry into the hash table
* and process the next candidate match */
if (bestEntry == NULL) {
- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
continue;
}
@@ -464,7 +486,7 @@ static size_t ZSTD_ldm_generateSequences_internal(
/* Insert the current entry into the hash table --- it must be
* done after the previous block to avoid clobbering bestEntry */
- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
anchor = split + forwardMatchLength;
@@ -503,7 +525,7 @@ static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
}
size_t ZSTD_ldm_generateSequences(
- ldmState_t* ldmState, rawSeqStore_t* sequences,
+ ldmState_t* ldmState, RawSeqStore_t* sequences,
ldmParams_t const* params, void const* src, size_t srcSize)
{
U32 const maxDist = 1U << params->windowLog;
@@ -549,7 +571,7 @@ size_t ZSTD_ldm_generateSequences(
* the window through early invalidation.
* TODO: * Test the chunk size.
* * Try invalidation after the sequence generation and test the
- * the offset against maxDist directly.
+ * offset against maxDist directly.
*
* NOTE: Because of dictionaries + sequence splitting we MUST make sure
* that any offset used is valid at the END of the sequence, since it may
@@ -580,7 +602,7 @@ size_t ZSTD_ldm_generateSequences(
}
void
-ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
+ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
{
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
@@ -616,7 +638,7 @@ ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const min
* Returns the current sequence to handle, or if the rest of the block should
* be literals, it returns a sequence with offset == 0.
*/
-static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
+static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore,
U32 const remaining, U32 const minMatch)
{
rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
@@ -640,7 +662,7 @@ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
return sequence;
}
-void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) {
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
@@ -657,14 +679,14 @@ void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
}
}
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_paramSwitch_e useRowMatchFinder,
+size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_ParamSwitch_e useRowMatchFinder,
void const* src, size_t srcSize)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
unsigned const minMatch = cParams->minMatch;
- ZSTD_blockCompressor const blockCompressor =
+ ZSTD_BlockCompressor_f const blockCompressor =
ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
@@ -689,7 +711,6 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
/* maybeSplitSequence updates rawSeqStore->pos */
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
(U32)(iend - ip), minMatch);
- int i;
/* End signal */
if (sequence.offset == 0)
break;
@@ -702,6 +723,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
/* Run the block compressor */
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
{
+ int i;
size_t const newLitLength =
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
ip += sequence.litLength;
@@ -711,7 +733,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
rep[0] = sequence.offset;
/* Store the sequence */
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
- STORE_OFFSET(sequence.offset),
+ OFFSET_TO_OFFBASE(sequence.offset),
sequence.matchLength);
ip += sequence.matchLength;
}
diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
index fbc6a5e88fd7..41400a7191b2 100644
--- a/lib/zstd/compress/zstd_ldm.h
+++ b/lib/zstd/compress/zstd_ldm.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,7 +12,6 @@
#ifndef ZSTD_LDM_H
#define ZSTD_LDM_H
-
#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
#include <linux/zstd.h> /* ZSTD_CCtx, size_t */
@@ -40,7 +40,7 @@ void ZSTD_ldm_fillHashTable(
* sequences.
*/
size_t ZSTD_ldm_generateSequences(
- ldmState_t* ldms, rawSeqStore_t* sequences,
+ ldmState_t* ldms, RawSeqStore_t* sequences,
ldmParams_t const* params, void const* src, size_t srcSize);
/*
@@ -61,9 +61,9 @@ size_t ZSTD_ldm_generateSequences(
* two. We handle that case correctly, and update `rawSeqStore` appropriately.
* NOTE: This function does not return any errors.
*/
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_paramSwitch_e useRowMatchFinder,
+size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore,
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_ParamSwitch_e useRowMatchFinder,
void const* src, size_t srcSize);
/*
@@ -73,7 +73,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
* Avoids emitting matches less than `minMatch` bytes.
* Must be called for data that is not passed to ZSTD_ldm_blockCompress().
*/
-void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize,
U32 const minMatch);
/* ZSTD_ldm_skipRawSeqStoreBytes():
@@ -81,7 +81,7 @@ void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
* Not to be used in conjunction with ZSTD_ldm_skipSequences().
* Must be called for data with is not passed to ZSTD_ldm_blockCompress().
*/
-void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
+void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes);
/* ZSTD_ldm_getTableSize() :
* Estimate the space needed for long distance matching tables or 0 if LDM is
@@ -107,5 +107,4 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
ZSTD_compressionParameters const* cParams);
-
#endif /* ZSTD_FAST_H */
diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h
index 647f865be290..cfccfc46f6f7 100644
--- a/lib/zstd/compress/zstd_ldm_geartab.h
+++ b/lib/zstd/compress/zstd_ldm_geartab.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
index fd82acfda62f..b62fd1b0d83e 100644
--- a/lib/zstd/compress/zstd_opt.c
+++ b/lib/zstd/compress/zstd_opt.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -12,11 +13,14 @@
#include "hist.h"
#include "zstd_opt.h"
+#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
#define ZSTD_MAX_PRICE (1<<30)
-#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
+#define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
/*-*************************************
@@ -26,27 +30,35 @@
#if 0 /* approximation at bit level (for tests) */
# define BITCOST_ACCURACY 0
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
-# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat))
+# define WEIGHT(stat, opt) ((void)(opt), ZSTD_bitWeight(stat))
#elif 0 /* fractional bit accuracy (for tests) */
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
-# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
+# define WEIGHT(stat,opt) ((void)(opt), ZSTD_fracWeight(stat))
#else /* opt==approx, ultra==accurate */
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
-# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
+# define WEIGHT(stat,opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
#endif
+/* ZSTD_bitWeight() :
+ * provide estimated "cost" of a stat in full bits only */
MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
{
return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
}
+/* ZSTD_fracWeight() :
+ * provide fractional-bit "cost" of a stat,
+ * using linear interpolation approximation */
MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
{
U32 const stat = rawStat + 1;
U32 const hb = ZSTD_highbit32(stat);
U32 const BWeight = hb * BITCOST_MULTIPLIER;
+ /* Fweight was meant for "Fractional weight"
+ * but it's effectively a value between 1 and 2
+ * using fixed point arithmetic */
U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
U32 const weight = BWeight + FWeight;
assert(hb + BITCOST_ACCURACY < 31);
@@ -57,7 +69,7 @@ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
/* debugging function,
* @return price in bytes as fractional value
* for debug messages only */
-MEM_STATIC double ZSTD_fCost(U32 price)
+MEM_STATIC double ZSTD_fCost(int price)
{
return (double)price / (BITCOST_MULTIPLIER*8);
}
@@ -88,20 +100,26 @@ static U32 sum_u32(const unsigned table[], size_t nbElts)
return total;
}
-static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift)
+typedef enum { base_0possible=0, base_1guaranteed=1 } base_directive_e;
+
+static U32
+ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift, base_directive_e base1)
{
U32 s, sum=0;
- DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift);
+ DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)",
+ (unsigned)lastEltIndex+1, (unsigned)shift );
assert(shift < 30);
for (s=0; s<lastEltIndex+1; s++) {
- table[s] = 1 + (table[s] >> shift);
- sum += table[s];
+ unsigned const base = base1 ? 1 : (table[s]>0);
+ unsigned const newStat = base + (table[s] >> shift);
+ sum += newStat;
+ table[s] = newStat;
}
return sum;
}
/* ZSTD_scaleStats() :
- * reduce all elements in table is sum too large
+ * reduce all elt frequencies in table if sum too large
* return the resulting sum of elements */
static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
{
@@ -110,7 +128,7 @@ static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
assert(logTarget < 30);
if (factor <= 1) return prevsum;
- return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor));
+ return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed);
}
/* ZSTD_rescaleFreqs() :
@@ -129,18 +147,22 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
optPtr->priceType = zop_dynamic;
- if (optPtr->litLengthSum == 0) { /* first block : init */
- if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
- DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
+ if (optPtr->litLengthSum == 0) { /* no literals stats collected -> first block assumed -> init */
+
+ /* heuristic: use pre-defined stats for too small inputs */
+ if (srcSize <= ZSTD_PREDEF_THRESHOLD) {
+ DEBUGLOG(5, "srcSize <= %i : use predefined stats", ZSTD_PREDEF_THRESHOLD);
optPtr->priceType = zop_predef;
}
assert(optPtr->symbolCosts != NULL);
if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
- /* huffman table presumed generated by dictionary */
+
+ /* huffman stats covering the full value set : table presumed generated by dictionary */
optPtr->priceType = zop_dynamic;
if (compressedLiterals) {
+ /* generate literals statistics from huffman table */
unsigned lit;
assert(optPtr->litFreq != NULL);
optPtr->litSum = 0;
@@ -188,13 +210,14 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
optPtr->offCodeSum += optPtr->offCodeFreq[of];
} }
- } else { /* not a dictionary */
+ } else { /* first block, no dictionary */
assert(optPtr->litFreq != NULL);
if (compressedLiterals) {
+ /* base initial cost of literals on direct frequency within src */
unsigned lit = MaxLit;
HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
- optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8);
+ optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8, base_0possible);
}
{ unsigned const baseLLfreqs[MaxLL+1] = {
@@ -224,10 +247,9 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
}
-
}
- } else { /* new block : re-use previous statistics, scaled down */
+ } else { /* new block : scale down accumulated statistics */
if (compressedLiterals)
optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
@@ -246,6 +268,7 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
const optState_t* const optPtr,
int optLevel)
{
+ DEBUGLOG(8, "ZSTD_rawLiteralsCost (%u literals)", litLength);
if (litLength == 0) return 0;
if (!ZSTD_compressedLiterals(optPtr))
@@ -255,11 +278,14 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
/* dynamic statistics */
- { U32 price = litLength * optPtr->litSumBasePrice;
+ { U32 price = optPtr->litSumBasePrice * litLength;
+ U32 const litPriceMax = optPtr->litSumBasePrice - BITCOST_MULTIPLIER;
U32 u;
+ assert(optPtr->litSumBasePrice >= BITCOST_MULTIPLIER);
for (u=0; u < litLength; u++) {
- assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
- price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+ U32 litPrice = WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+ if (UNLIKELY(litPrice > litPriceMax)) litPrice = litPriceMax;
+ price -= litPrice;
}
return price;
}
@@ -272,10 +298,11 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
assert(litLength <= ZSTD_BLOCKSIZE_MAX);
if (optPtr->priceType == zop_predef)
return WEIGHT(litLength, optLevel);
- /* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
- * because it isn't representable in the zstd format. So instead just
- * call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block
- * would be all literals.
+
+ /* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
+ * because it isn't representable in the zstd format.
+ * So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1.
+ * In such a case, the block would be all literals.
*/
if (litLength == ZSTD_BLOCKSIZE_MAX)
return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
@@ -289,24 +316,25 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
}
/* ZSTD_getMatchPrice() :
- * Provides the cost of the match part (offset + matchLength) of a sequence
+ * Provides the cost of the match part (offset + matchLength) of a sequence.
* Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
- * @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2
+ * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq()
* @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
*/
FORCE_INLINE_TEMPLATE U32
-ZSTD_getMatchPrice(U32 const offcode,
+ZSTD_getMatchPrice(U32 const offBase,
U32 const matchLength,
const optState_t* const optPtr,
int const optLevel)
{
U32 price;
- U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode));
+ U32 const offCode = ZSTD_highbit32(offBase);
U32 const mlBase = matchLength - MINMATCH;
assert(matchLength >= MINMATCH);
- if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
- return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
+ if (optPtr->priceType == zop_predef) /* fixed scheme, does not use statistics */
+ return WEIGHT(mlBase, optLevel)
+ + ((16 + offCode) * BITCOST_MULTIPLIER); /* emulated offset cost */
/* dynamic statistics */
price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
@@ -325,10 +353,10 @@ ZSTD_getMatchPrice(U32 const offcode,
}
/* ZSTD_updateStats() :
- * assumption : literals + litLengtn <= iend */
+ * assumption : literals + litLength <= iend */
static void ZSTD_updateStats(optState_t* const optPtr,
U32 litLength, const BYTE* literals,
- U32 offsetCode, U32 matchLength)
+ U32 offBase, U32 matchLength)
{
/* literals */
if (ZSTD_compressedLiterals(optPtr)) {
@@ -344,8 +372,8 @@ static void ZSTD_updateStats(optState_t* const optPtr,
optPtr->litLengthSum++;
}
- /* offset code : expected to follow storeSeq() numeric representation */
- { U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode));
+ /* offset code : follows storeSeq() numeric representation */
+ { U32 const offCode = ZSTD_highbit32(offBase);
assert(offCode <= MaxOff);
optPtr->offCodeFreq[offCode]++;
optPtr->offCodeSum++;
@@ -379,9 +407,11 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
/* Update hashTable3 up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
-static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
- U32* nextToUpdate3,
- const BYTE* const ip)
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_MatchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip)
{
U32* const hashTable3 = ms->hashTable3;
U32 const hashLog3 = ms->hashLog3;
@@ -408,8 +438,10 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
* @param ip assumed <= iend-8 .
* @param target The target of ZSTD_updateTree_internal() - we are filling to this position
* @return : nb of positions added */
-static U32 ZSTD_insertBt1(
- const ZSTD_matchState_t* ms,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_insertBt1(
+ const ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
U32 const target,
U32 const mls, const int extDict)
@@ -527,15 +559,16 @@ static U32 ZSTD_insertBt1(
}
FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_updateTree_internal(
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
const U32 mls, const ZSTD_dictMode_e dictMode)
{
const BYTE* const base = ms->window.base;
U32 const target = (U32)(ip - base);
U32 idx = ms->nextToUpdate;
- DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
+ DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
idx, target, dictMode);
while(idx < target) {
@@ -548,20 +581,23 @@ void ZSTD_updateTree_internal(
ms->nextToUpdate = target;
}
-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend) {
ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
}
FORCE_INLINE_TEMPLATE
-U32 ZSTD_insertBtAndGetAllMatches (
- ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
- ZSTD_matchState_t* ms,
- U32* nextToUpdate3,
- const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
- const U32 rep[ZSTD_REP_NUM],
- U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
- const U32 lengthToBeat,
- U32 const mls /* template */)
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32
+ZSTD_insertBtAndGetAllMatches (
+ ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
+ ZSTD_MatchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip, const BYTE* const iLimit,
+ const ZSTD_dictMode_e dictMode,
+ const U32 rep[ZSTD_REP_NUM],
+ const U32 ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
+ const U32 lengthToBeat,
+ const U32 mls /* template */)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
@@ -590,7 +626,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
U32 mnum = 0;
U32 nbCompares = 1U << cParams->searchLog;
- const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+ const ZSTD_MatchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
const ZSTD_compressionParameters* const dmsCParams =
dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
@@ -629,13 +665,13 @@ U32 ZSTD_insertBtAndGetAllMatches (
assert(curr >= windowLow);
if ( dictMode == ZSTD_extDict
&& ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
- & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
+ & (ZSTD_index_overlap_check(dictLimit, repIndex)) )
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
}
if (dictMode == ZSTD_dictMatchState
&& ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
- & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (ZSTD_index_overlap_check(dictLimit, repIndex)) )
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
} }
@@ -644,7 +680,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
repCode, ll0, repOffset, repLen);
bestLength = repLen;
- matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */
+ matches[mnum].off = REPCODE_TO_OFFBASE(repCode - ll0 + 1); /* expect value between 1 and 3 */
matches[mnum].len = (U32)repLen;
mnum++;
if ( (repLen > sufficient_len)
@@ -673,7 +709,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
bestLength = mlen;
assert(curr > matchIndex3);
assert(mnum==0); /* no prior solution */
- matches[0].off = STORE_OFFSET(curr - matchIndex3);
+ matches[0].off = OFFSET_TO_OFFBASE(curr - matchIndex3);
matches[0].len = (U32)mlen;
mnum = 1;
if ( (mlen > sufficient_len) |
@@ -706,13 +742,13 @@ U32 ZSTD_insertBtAndGetAllMatches (
}
if (matchLength > bestLength) {
- DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
- (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
+ DEBUGLOG(8, "found match of length %u at distance %u (offBase=%u)",
+ (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex));
assert(matchEndIdx > matchIndex);
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
bestLength = matchLength;
- matches[mnum].off = STORE_OFFSET(curr - matchIndex);
+ matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex);
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
@@ -754,12 +790,12 @@ U32 ZSTD_insertBtAndGetAllMatches (
if (matchLength > bestLength) {
matchIndex = dictMatchIndex + dmsIndexDelta;
- DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
- (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
+ DEBUGLOG(8, "found dms match of length %u at distance %u (offBase=%u)",
+ (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex));
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
bestLength = matchLength;
- matches[mnum].off = STORE_OFFSET(curr - matchIndex);
+ matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex);
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
@@ -784,7 +820,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
typedef U32 (*ZSTD_getAllMatchesFn)(
ZSTD_match_t*,
- ZSTD_matchState_t*,
+ ZSTD_MatchState_t*,
U32*,
const BYTE*,
const BYTE*,
@@ -792,9 +828,11 @@ typedef U32 (*ZSTD_getAllMatchesFn)(
U32 const ll0,
U32 const lengthToBeat);
-FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+U32 ZSTD_btGetAllMatches_internal(
ZSTD_match_t* matches,
- ZSTD_matchState_t* ms,
+ ZSTD_MatchState_t* ms,
U32* nextToUpdate3,
const BYTE* ip,
const BYTE* const iHighLimit,
@@ -817,7 +855,7 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
ZSTD_match_t* matches, \
- ZSTD_matchState_t* ms, \
+ ZSTD_MatchState_t* ms, \
U32* nextToUpdate3, \
const BYTE* ip, \
const BYTE* const iHighLimit, \
@@ -849,7 +887,7 @@ GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
}
static ZSTD_getAllMatchesFn
-ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
+ZSTD_selectBtGetAllMatches(ZSTD_MatchState_t const* ms, ZSTD_dictMode_e const dictMode)
{
ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
@@ -868,7 +906,7 @@ ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const di
/* Struct containing info needed to make decision about ldm inclusion */
typedef struct {
- rawSeqStore_t seqStore; /* External match candidates store for this block */
+ RawSeqStore_t seqStore; /* External match candidates store for this block */
U32 startPosInBlock; /* Start position of the current match candidate */
U32 endPosInBlock; /* End position of the current match candidate */
U32 offset; /* Offset of the match candidate */
@@ -878,7 +916,7 @@ typedef struct {
* Moves forward in @rawSeqStore by @nbBytes,
* which will update the fields 'pos' and 'posInSequence'.
*/
-static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
+static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes)
{
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
@@ -935,7 +973,7 @@ ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock
return;
}
- /* Matches may be < MINMATCH by this process. In that case, we will reject them
+ /* Matches may be < minMatch by this process. In that case, we will reject them
when we are deciding whether or not to add the ldm */
optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
@@ -957,25 +995,26 @@ ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock
* into 'matches'. Maintains the correct ordering of 'matches'.
*/
static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
- const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
+ const ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
+ U32 minMatch)
{
U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
- /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
+ /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */
U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
/* Ensure that current block position is not outside of the match */
if (currPosInBlock < optLdm->startPosInBlock
|| currPosInBlock >= optLdm->endPosInBlock
- || candidateMatchLength < MINMATCH) {
+ || candidateMatchLength < minMatch) {
return;
}
if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
- U32 const candidateOffCode = STORE_OFFSET(optLdm->offset);
- DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
- candidateOffCode, candidateMatchLength, currPosInBlock);
+ U32 const candidateOffBase = OFFSET_TO_OFFBASE(optLdm->offset);
+ DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offBase: %u matchLength %u) at block position=%u",
+ candidateOffBase, candidateMatchLength, currPosInBlock);
matches[*nbMatches].len = candidateMatchLength;
- matches[*nbMatches].off = candidateOffCode;
+ matches[*nbMatches].off = candidateOffBase;
(*nbMatches)++;
}
}
@@ -986,7 +1025,8 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
static void
ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
ZSTD_match_t* matches, U32* nbMatches,
- U32 currPosInBlock, U32 remainingBytes)
+ U32 currPosInBlock, U32 remainingBytes,
+ U32 minMatch)
{
if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
return;
@@ -1003,7 +1043,7 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
}
ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
}
- ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
+ ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock, minMatch);
}
@@ -1011,11 +1051,6 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
* Optimal parser
*********************************/
-static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
-{
- return sol.litlen + sol.mlen;
-}
-
#if 0 /* debug */
static void
@@ -1033,9 +1068,15 @@ listStats(const U32* table, int lastEltID)
#endif
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
- seqStore_t* seqStore,
+#define LIT_PRICE(_p) (int)ZSTD_rawLiteralsCost(_p, 1, optStatePtr, optLevel)
+#define LL_PRICE(_l) (int)ZSTD_litLengthPrice(_l, optStatePtr, optLevel)
+#define LL_INCPRICE(_l) (LL_PRICE(_l) - LL_PRICE(_l-1))
+
+FORCE_INLINE_TEMPLATE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t
+ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms,
+ SeqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const int optLevel,
@@ -1059,9 +1100,11 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
ZSTD_optimal_t* const opt = optStatePtr->priceTable;
ZSTD_match_t* const matches = optStatePtr->matchTable;
- ZSTD_optimal_t lastSequence;
+ ZSTD_optimal_t lastStretch;
ZSTD_optLdm_t optLdm;
+ ZSTD_memset(&lastStretch, 0, sizeof(ZSTD_optimal_t));
+
optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
@@ -1082,103 +1125,140 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
U32 const ll0 = !litlen;
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
- (U32)(ip-istart), (U32)(iend - ip));
- if (!nbMatches) { ip++; continue; }
+ (U32)(ip-istart), (U32)(iend-ip),
+ minMatch);
+ if (!nbMatches) {
+ DEBUGLOG(8, "no match found at cPos %u", (unsigned)(ip-istart));
+ ip++;
+ continue;
+ }
+
+ /* Match found: let's store this solution, and eventually find more candidates.
+ * During this forward pass, @opt is used to store stretches,
+ * defined as "a match followed by N literals".
+ * Note how this is different from a Sequence, which is "N literals followed by a match".
+ * Storing stretches allows us to store different match predecessors
+ * for each literal position part of a literals run. */
/* initialize opt[0] */
- { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
- opt[0].mlen = 0; /* means is_a_literal */
+ opt[0].mlen = 0; /* there are only literals so far */
opt[0].litlen = litlen;
- /* We don't need to include the actual price of the literals because
- * it is static for the duration of the forward pass, and is included
- * in every price. We include the literal length to avoid negative
- * prices when we subtract the previous literal length.
+ /* No need to include the actual price of the literals before the first match
+ * because it is static for the duration of the forward pass, and is included
+ * in every subsequent price. But, we include the literal length because
+ * the cost variation of litlen depends on the value of litlen.
*/
- opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
+ opt[0].price = LL_PRICE(litlen);
+ ZSTD_STATIC_ASSERT(sizeof(opt[0].rep[0]) == sizeof(rep[0]));
+ ZSTD_memcpy(&opt[0].rep, rep, sizeof(opt[0].rep));
/* large match -> immediate encoding */
{ U32 const maxML = matches[nbMatches-1].len;
- U32 const maxOffcode = matches[nbMatches-1].off;
- DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
- nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart));
+ U32 const maxOffBase = matches[nbMatches-1].off;
+ DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffBase=%u at cPos=%u => start new series",
+ nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart));
if (maxML > sufficient_len) {
- lastSequence.litlen = litlen;
- lastSequence.mlen = maxML;
- lastSequence.off = maxOffcode;
- DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+ lastStretch.litlen = 0;
+ lastStretch.mlen = maxML;
+ lastStretch.off = maxOffBase;
+ DEBUGLOG(6, "large match (%u>%u) => immediate encoding",
maxML, sufficient_len);
cur = 0;
- last_pos = ZSTD_totalLen(lastSequence);
+ last_pos = maxML;
goto _shortestPath;
} }
/* set prices for first matches starting position == 0 */
assert(opt[0].price >= 0);
- { U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
- U32 pos;
+ { U32 pos;
U32 matchNb;
for (pos = 1; pos < minMatch; pos++) {
- opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
+ opt[pos].price = ZSTD_MAX_PRICE;
+ opt[pos].mlen = 0;
+ opt[pos].litlen = litlen + pos;
}
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
- U32 const offcode = matches[matchNb].off;
+ U32 const offBase = matches[matchNb].off;
U32 const end = matches[matchNb].len;
for ( ; pos <= end ; pos++ ) {
- U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel);
- U32 const sequencePrice = literalsPrice + matchPrice;
+ int const matchPrice = (int)ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel);
+ int const sequencePrice = opt[0].price + matchPrice;
DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
pos, ZSTD_fCost(sequencePrice));
opt[pos].mlen = pos;
- opt[pos].off = offcode;
- opt[pos].litlen = litlen;
- opt[pos].price = (int)sequencePrice;
- } }
+ opt[pos].off = offBase;
+ opt[pos].litlen = 0; /* end of match */
+ opt[pos].price = sequencePrice + LL_PRICE(0);
+ }
+ }
last_pos = pos-1;
+ opt[pos].price = ZSTD_MAX_PRICE;
}
}
/* check further positions */
for (cur = 1; cur <= last_pos; cur++) {
const BYTE* const inr = ip + cur;
- assert(cur < ZSTD_OPT_NUM);
- DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
+ assert(cur <= ZSTD_OPT_NUM);
+ DEBUGLOG(7, "cPos:%i==rPos:%u", (int)(inr-istart), cur);
/* Fix current position with one literal if cheaper */
- { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
+ { U32 const litlen = opt[cur-1].litlen + 1;
int const price = opt[cur-1].price
- + (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
- + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
- - (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
+ + LIT_PRICE(ip+cur-1)
+ + LL_INCPRICE(litlen);
assert(price < 1000000000); /* overflow check */
if (price <= opt[cur].price) {
- DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
- inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+ ZSTD_optimal_t const prevMatch = opt[cur];
+ DEBUGLOG(7, "cPos:%i==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+ (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
- opt[cur].mlen = 0;
- opt[cur].off = 0;
+ opt[cur] = opt[cur-1];
opt[cur].litlen = litlen;
opt[cur].price = price;
+ if ( (optLevel >= 1) /* additional check only for higher modes */
+ && (prevMatch.litlen == 0) /* replace a match */
+ && (LL_INCPRICE(1) < 0) /* ll1 is cheaper than ll0 */
+ && LIKELY(ip + cur < iend)
+ ) {
+ /* check next position, in case it would be cheaper */
+ int with1literal = prevMatch.price + LIT_PRICE(ip+cur) + LL_INCPRICE(1);
+ int withMoreLiterals = price + LIT_PRICE(ip+cur) + LL_INCPRICE(litlen+1);
+ DEBUGLOG(7, "then at next rPos %u : match+1lit %.2f vs %ulits %.2f",
+ cur+1, ZSTD_fCost(with1literal), litlen+1, ZSTD_fCost(withMoreLiterals));
+ if ( (with1literal < withMoreLiterals)
+ && (with1literal < opt[cur+1].price) ) {
+ /* update offset history - before it disappears */
+ U32 const prev = cur - prevMatch.mlen;
+ Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0);
+ assert(cur >= prevMatch.mlen);
+ DEBUGLOG(7, "==> match+1lit is cheaper (%.2f < %.2f) (hist:%u,%u,%u) !",
+ ZSTD_fCost(with1literal), ZSTD_fCost(withMoreLiterals),
+ newReps.rep[0], newReps.rep[1], newReps.rep[2] );
+ opt[cur+1] = prevMatch; /* mlen & offbase */
+ ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(Repcodes_t));
+ opt[cur+1].litlen = 1;
+ opt[cur+1].price = with1literal;
+ if (last_pos < cur+1) last_pos = cur+1;
+ }
+ }
} else {
- DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
- inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
- opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
+ DEBUGLOG(7, "cPos:%i==rPos:%u : literal would cost more (%.2f>%.2f)",
+ (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price));
}
}
- /* Set the repcodes of the current position. We must do it here
- * because we rely on the repcodes of the 2nd to last sequence being
- * correct to set the next chunks repcodes during the backward
- * traversal.
+ /* Offset history is not updated during match comparison.
+ * Do it here, now that the match is selected and confirmed.
*/
- ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
+ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(Repcodes_t));
assert(cur >= opt[cur].mlen);
- if (opt[cur].mlen != 0) {
+ if (opt[cur].litlen == 0) {
+ /* just finished a match => alter offset history */
U32 const prev = cur - opt[cur].mlen;
- repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
- ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
- } else {
- ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
+ Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0);
+ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(Repcodes_t));
}
/* last match must start at a minimum distance of 8 from oend */
@@ -1188,38 +1268,37 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
if ( (optLevel==0) /*static_test*/
&& (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
- DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
+ DEBUGLOG(7, "skip current position : next rPos(%u) price is cheaper", cur+1);
continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
}
assert(opt[cur].price >= 0);
- { U32 const ll0 = (opt[cur].mlen != 0);
- U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
- U32 const previousPrice = (U32)opt[cur].price;
- U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ { U32 const ll0 = (opt[cur].litlen == 0);
+ int const previousPrice = opt[cur].price;
+ int const basePrice = previousPrice + LL_PRICE(0);
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
U32 matchNb;
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
- (U32)(inr-istart), (U32)(iend-inr));
+ (U32)(inr-istart), (U32)(iend-inr),
+ minMatch);
if (!nbMatches) {
DEBUGLOG(7, "rPos:%u : no match found", cur);
continue;
}
- { U32 const maxML = matches[nbMatches-1].len;
- DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
- inr-istart, cur, nbMatches, maxML);
-
- if ( (maxML > sufficient_len)
- || (cur + maxML >= ZSTD_OPT_NUM) ) {
- lastSequence.mlen = maxML;
- lastSequence.off = matches[nbMatches-1].off;
- lastSequence.litlen = litlen;
- cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
- last_pos = cur + ZSTD_totalLen(lastSequence);
- if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
+ { U32 const longestML = matches[nbMatches-1].len;
+ DEBUGLOG(7, "cPos:%i==rPos:%u, found %u matches, of longest ML=%u",
+ (int)(inr-istart), cur, nbMatches, longestML);
+
+ if ( (longestML > sufficient_len)
+ || (cur + longestML >= ZSTD_OPT_NUM)
+ || (ip + cur + longestML >= iend) ) {
+ lastStretch.mlen = longestML;
+ lastStretch.off = matches[nbMatches-1].off;
+ lastStretch.litlen = 0;
+ last_pos = cur + longestML;
goto _shortestPath;
} }
@@ -1230,20 +1309,25 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
U32 mlen;
- DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
- matchNb, matches[matchNb].off, lastML, litlen);
+ DEBUGLOG(7, "testing match %u => offBase=%4u, mlen=%2u, llen=%2u",
+ matchNb, matches[matchNb].off, lastML, opt[cur].litlen);
for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
U32 const pos = cur + mlen;
- int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
+ int const price = basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
if ((pos > last_pos) || (price < opt[pos].price)) {
DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
- while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
+ while (last_pos < pos) {
+ /* fill empty positions, for future comparisons */
+ last_pos++;
+ opt[last_pos].price = ZSTD_MAX_PRICE;
+ opt[last_pos].litlen = !0; /* just needs to be != 0, to mean "not an end of match" */
+ }
opt[pos].mlen = mlen;
opt[pos].off = offset;
- opt[pos].litlen = litlen;
+ opt[pos].litlen = 0;
opt[pos].price = price;
} else {
DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
@@ -1251,55 +1335,89 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
}
} } }
+ opt[last_pos+1].price = ZSTD_MAX_PRICE;
} /* for (cur = 1; cur <= last_pos; cur++) */
- lastSequence = opt[last_pos];
- cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
- assert(cur < ZSTD_OPT_NUM); /* control overflow*/
+ lastStretch = opt[last_pos];
+ assert(cur >= lastStretch.mlen);
+ cur = last_pos - lastStretch.mlen;
_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
assert(opt[0].mlen == 0);
+ assert(last_pos >= lastStretch.mlen);
+ assert(cur == last_pos - lastStretch.mlen);
- /* Set the next chunk's repcodes based on the repcodes of the beginning
- * of the last match, and the last sequence. This avoids us having to
- * update them while traversing the sequences.
- */
- if (lastSequence.mlen != 0) {
- repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
- ZSTD_memcpy(rep, &reps, sizeof(reps));
+ if (lastStretch.mlen==0) {
+ /* no solution : all matches have been converted into literals */
+ assert(lastStretch.litlen == (ip - anchor) + last_pos);
+ ip += last_pos;
+ continue;
+ }
+ assert(lastStretch.off > 0);
+
+ /* Update offset history */
+ if (lastStretch.litlen == 0) {
+ /* finishing on a match : update offset history */
+ Repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0);
+ ZSTD_memcpy(rep, &reps, sizeof(Repcodes_t));
} else {
- ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
+ ZSTD_memcpy(rep, lastStretch.rep, sizeof(Repcodes_t));
+ assert(cur >= lastStretch.litlen);
+ cur -= lastStretch.litlen;
}
- { U32 const storeEnd = cur + 1;
+ /* Let's write the shortest path solution.
+ * It is stored in @opt in reverse order,
+ * starting from @storeEnd (==cur+2),
+ * effectively partially @opt overwriting.
+ * Content is changed too:
+ * - So far, @opt stored stretches, aka a match followed by literals
+ * - Now, it will store sequences, aka literals followed by a match
+ */
+ { U32 const storeEnd = cur + 2;
U32 storeStart = storeEnd;
- U32 seqPos = cur;
+ U32 stretchPos = cur;
DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
last_pos, cur); (void)last_pos;
- assert(storeEnd < ZSTD_OPT_NUM);
- DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
- storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
- opt[storeEnd] = lastSequence;
- while (seqPos > 0) {
- U32 const backDist = ZSTD_totalLen(opt[seqPos]);
+ assert(storeEnd < ZSTD_OPT_SIZE);
+ DEBUGLOG(6, "last stretch copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ storeEnd, lastStretch.litlen, lastStretch.mlen, lastStretch.off);
+ if (lastStretch.litlen > 0) {
+ /* last "sequence" is unfinished: just a bunch of literals */
+ opt[storeEnd].litlen = lastStretch.litlen;
+ opt[storeEnd].mlen = 0;
+ storeStart = storeEnd-1;
+ opt[storeStart] = lastStretch;
+ } {
+ opt[storeEnd] = lastStretch; /* note: litlen will be fixed */
+ storeStart = storeEnd;
+ }
+ while (1) {
+ ZSTD_optimal_t nextStretch = opt[stretchPos];
+ opt[storeStart].litlen = nextStretch.litlen;
+ DEBUGLOG(6, "selected sequence (llen=%u,mlen=%u,ofc=%u)",
+ opt[storeStart].litlen, opt[storeStart].mlen, opt[storeStart].off);
+ if (nextStretch.mlen == 0) {
+ /* reaching beginning of segment */
+ break;
+ }
storeStart--;
- DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
- seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
- opt[storeStart] = opt[seqPos];
- seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
+ opt[storeStart] = nextStretch; /* note: litlen will be fixed */
+ assert(nextStretch.litlen + nextStretch.mlen <= stretchPos);
+ stretchPos -= nextStretch.litlen + nextStretch.mlen;
}
/* save sequences */
- DEBUGLOG(6, "sending selected sequences into seqStore")
+ DEBUGLOG(6, "sending selected sequences into seqStore");
{ U32 storePos;
for (storePos=storeStart; storePos <= storeEnd; storePos++) {
U32 const llen = opt[storePos].litlen;
U32 const mlen = opt[storePos].mlen;
- U32 const offCode = opt[storePos].off;
+ U32 const offBase = opt[storePos].off;
U32 const advance = llen + mlen;
- DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
- anchor - istart, (unsigned)llen, (unsigned)mlen);
+ DEBUGLOG(6, "considering seq starting at %i, llen=%u, mlen=%u",
+ (int)(anchor - istart), (unsigned)llen, (unsigned)mlen);
if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
assert(storePos == storeEnd); /* must be last sequence */
@@ -1308,11 +1426,14 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
}
assert(anchor + llen <= iend);
- ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
- ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen);
+ ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen);
+ ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen);
anchor += advance;
ip = anchor;
} }
+ DEBUGLOG(7, "new offset history : %u, %u, %u", rep[0], rep[1], rep[2]);
+
+ /* update all costs */
ZSTD_setBasePrices(optStatePtr, optLevel);
}
} /* while (ip < ilimit) */
@@ -1320,42 +1441,51 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
/* Return the last literals size */
return (size_t)(iend - anchor);
}
+#endif /* build exclusions */
+#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
static size_t ZSTD_compressBlock_opt0(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
{
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
}
+#endif
+#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
static size_t ZSTD_compressBlock_opt2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
{
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
}
+#endif
+#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btopt(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock_btopt");
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
}
+#endif
+#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
/* ZSTD_initStats_ultra():
* make a first compression pass, just to seed stats with more accurate starting values.
* only works on first block, with no dictionary and no ldm.
- * this function cannot error, hence its contract must be respected.
+ * this function cannot error out, its narrow contract must be respected.
*/
-static void
-ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
- seqStore_t* seqStore,
- U32 rep[ZSTD_REP_NUM],
- const void* src, size_t srcSize)
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms,
+ SeqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
{
U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
@@ -1368,7 +1498,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
- /* invalidate first scan from history */
+ /* invalidate first scan from history, only keep entropy stats */
ZSTD_resetSeqStore(seqStore);
ms->window.base -= srcSize;
ms->window.dictLimit += (U32)srcSize;
@@ -1378,7 +1508,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
}
size_t ZSTD_compressBlock_btultra(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
@@ -1386,16 +1516,16 @@ size_t ZSTD_compressBlock_btultra(
}
size_t ZSTD_compressBlock_btultra2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
U32 const curr = (U32)((const BYTE*)src - ms->window.base);
DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
- /* 2-pass strategy:
+ /* 2-passes strategy:
* this strategy makes a first pass over first block to collect statistics
- * and seed next round's statistics with it.
- * After 1st pass, function forgets everything, and starts a new block.
+ * in order to seed next round's statistics with it.
+ * After 1st pass, function forgets history, and starts a new block.
* Consequently, this can only work if no data has been previously loaded in tables,
* aka, no dictionary, no prefix, no ldm preprocessing.
* The compression ratio gain is generally small (~0.5% on first block),
@@ -1404,42 +1534,47 @@ size_t ZSTD_compressBlock_btultra2(
if ( (ms->opt.litLengthSum==0) /* first block */
&& (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
&& (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
- && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
- && (srcSize > ZSTD_PREDEF_THRESHOLD)
+ && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
+ && (srcSize > ZSTD_PREDEF_THRESHOLD) /* input large enough to not employ default stats */
) {
ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
}
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
}
+#endif
+#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btopt_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
}
-size_t ZSTD_compressBlock_btultra_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
- return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
}
+#endif
-size_t ZSTD_compressBlock_btopt_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
- return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_btultra_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
}
+#endif
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
index 22b862858ba7..fbdc540ec9d1 100644
--- a/lib/zstd/compress/zstd_opt.h
+++ b/lib/zstd/compress/zstd_opt.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,40 +12,62 @@
#ifndef ZSTD_OPT_H
#define ZSTD_OPT_H
-
#include "zstd_compress_internal.h"
+#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
+ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
/* used in ZSTD_loadDictionaryContent() */
-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
+void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend);
+#endif
+#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btopt(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
+#define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt
+#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE ZSTD_compressBlock_btopt_dictMatchState
+#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT ZSTD_compressBlock_btopt_extDict
+#else
+#define ZSTD_COMPRESSBLOCK_BTOPT NULL
+#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT NULL
+#endif
-size_t ZSTD_compressBlock_btopt_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btultra_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize);
-
-size_t ZSTD_compressBlock_btopt_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btultra_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
* and is only specific for the first block (no prefix) */
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+#define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra
+#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE ZSTD_compressBlock_btultra_dictMatchState
+#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT ZSTD_compressBlock_btultra_extDict
+#define ZSTD_COMPRESSBLOCK_BTULTRA2 ZSTD_compressBlock_btultra2
+#else
+#define ZSTD_COMPRESSBLOCK_BTULTRA NULL
+#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE NULL
+#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT NULL
+#define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL
+#endif
#endif /* ZSTD_OPT_H */
diff --git a/lib/zstd/compress/zstd_preSplit.c b/lib/zstd/compress/zstd_preSplit.c
new file mode 100644
index 000000000000..7d9403c9a3bc
--- /dev/null
+++ b/lib/zstd/compress/zstd_preSplit.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "../common/compiler.h" /* ZSTD_ALIGNOF */
+#include "../common/mem.h" /* S64 */
+#include "../common/zstd_deps.h" /* ZSTD_memset */
+#include "../common/zstd_internal.h" /* ZSTD_STATIC_ASSERT */
+#include "hist.h" /* HIST_add */
+#include "zstd_preSplit.h"
+
+
+#define BLOCKSIZE_MIN 3500
+#define THRESHOLD_PENALTY_RATE 16
+#define THRESHOLD_BASE (THRESHOLD_PENALTY_RATE - 2)
+#define THRESHOLD_PENALTY 3
+
+#define HASHLENGTH 2
+#define HASHLOG_MAX 10
+#define HASHTABLESIZE (1 << HASHLOG_MAX)
+#define HASHMASK (HASHTABLESIZE - 1)
+#define KNUTH 0x9e3779b9
+
+/* for hashLog > 8, hash 2 bytes.
+ * for hashLog == 8, just take the byte, no hashing.
+ * The speed of this method relies on compile-time constant propagation */
+FORCE_INLINE_TEMPLATE unsigned hash2(const void *p, unsigned hashLog)
+{
+ assert(hashLog >= 8);
+ if (hashLog == 8) return (U32)((const BYTE*)p)[0];
+ assert(hashLog <= HASHLOG_MAX);
+ return (U32)(MEM_read16(p)) * KNUTH >> (32 - hashLog);
+}
+
+
+typedef struct {
+ unsigned events[HASHTABLESIZE];
+ size_t nbEvents;
+} Fingerprint;
+typedef struct {
+ Fingerprint pastEvents;
+ Fingerprint newEvents;
+} FPStats;
+
+static void initStats(FPStats* fpstats)
+{
+ ZSTD_memset(fpstats, 0, sizeof(FPStats));
+}
+
+FORCE_INLINE_TEMPLATE void
+addEvents_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
+{
+ const char* p = (const char*)src;
+ size_t limit = srcSize - HASHLENGTH + 1;
+ size_t n;
+ assert(srcSize >= HASHLENGTH);
+ for (n = 0; n < limit; n+=samplingRate) {
+ fp->events[hash2(p+n, hashLog)]++;
+ }
+ fp->nbEvents += limit/samplingRate;
+}
+
+FORCE_INLINE_TEMPLATE void
+recordFingerprint_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
+{
+ ZSTD_memset(fp, 0, sizeof(unsigned) * ((size_t)1 << hashLog));
+ fp->nbEvents = 0;
+ addEvents_generic(fp, src, srcSize, samplingRate, hashLog);
+}
+
+typedef void (*RecordEvents_f)(Fingerprint* fp, const void* src, size_t srcSize);
+
+#define FP_RECORD(_rate) ZSTD_recordFingerprint_##_rate
+
+#define ZSTD_GEN_RECORD_FINGERPRINT(_rate, _hSize) \
+ static void FP_RECORD(_rate)(Fingerprint* fp, const void* src, size_t srcSize) \
+ { \
+ recordFingerprint_generic(fp, src, srcSize, _rate, _hSize); \
+ }
+
+ZSTD_GEN_RECORD_FINGERPRINT(1, 10)
+ZSTD_GEN_RECORD_FINGERPRINT(5, 10)
+ZSTD_GEN_RECORD_FINGERPRINT(11, 9)
+ZSTD_GEN_RECORD_FINGERPRINT(43, 8)
+
+
+static U64 abs64(S64 s64) { return (U64)((s64 < 0) ? -s64 : s64); }
+
+static U64 fpDistance(const Fingerprint* fp1, const Fingerprint* fp2, unsigned hashLog)
+{
+ U64 distance = 0;
+ size_t n;
+ assert(hashLog <= HASHLOG_MAX);
+ for (n = 0; n < ((size_t)1 << hashLog); n++) {
+ distance +=
+ abs64((S64)fp1->events[n] * (S64)fp2->nbEvents - (S64)fp2->events[n] * (S64)fp1->nbEvents);
+ }
+ return distance;
+}
+
+/* Compare newEvents with pastEvents
+ * return 1 when considered "too different"
+ */
+static int compareFingerprints(const Fingerprint* ref,
+ const Fingerprint* newfp,
+ int penalty,
+ unsigned hashLog)
+{
+ assert(ref->nbEvents > 0);
+ assert(newfp->nbEvents > 0);
+ { U64 p50 = (U64)ref->nbEvents * (U64)newfp->nbEvents;
+ U64 deviation = fpDistance(ref, newfp, hashLog);
+ U64 threshold = p50 * (U64)(THRESHOLD_BASE + penalty) / THRESHOLD_PENALTY_RATE;
+ return deviation >= threshold;
+ }
+}
+
+static void mergeEvents(Fingerprint* acc, const Fingerprint* newfp)
+{
+ size_t n;
+ for (n = 0; n < HASHTABLESIZE; n++) {
+ acc->events[n] += newfp->events[n];
+ }
+ acc->nbEvents += newfp->nbEvents;
+}
+
+static void flushEvents(FPStats* fpstats)
+{
+ size_t n;
+ for (n = 0; n < HASHTABLESIZE; n++) {
+ fpstats->pastEvents.events[n] = fpstats->newEvents.events[n];
+ }
+ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents;
+ ZSTD_memset(&fpstats->newEvents, 0, sizeof(fpstats->newEvents));
+}
+
+static void removeEvents(Fingerprint* acc, const Fingerprint* slice)
+{
+ size_t n;
+ for (n = 0; n < HASHTABLESIZE; n++) {
+ assert(acc->events[n] >= slice->events[n]);
+ acc->events[n] -= slice->events[n];
+ }
+ acc->nbEvents -= slice->nbEvents;
+}
+
+#define CHUNKSIZE (8 << 10)
+static size_t ZSTD_splitBlock_byChunks(const void* blockStart, size_t blockSize,
+ int level,
+ void* workspace, size_t wkspSize)
+{
+ static const RecordEvents_f records_fs[] = {
+ FP_RECORD(43), FP_RECORD(11), FP_RECORD(5), FP_RECORD(1)
+ };
+ static const unsigned hashParams[] = { 8, 9, 10, 10 };
+ const RecordEvents_f record_f = (assert(0<=level && level<=3), records_fs[level]);
+ FPStats* const fpstats = (FPStats*)workspace;
+ const char* p = (const char*)blockStart;
+ int penalty = THRESHOLD_PENALTY;
+ size_t pos = 0;
+ assert(blockSize == (128 << 10));
+ assert(workspace != NULL);
+ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
+ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
+ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
+
+ initStats(fpstats);
+ record_f(&fpstats->pastEvents, p, CHUNKSIZE);
+ for (pos = CHUNKSIZE; pos <= blockSize - CHUNKSIZE; pos += CHUNKSIZE) {
+ record_f(&fpstats->newEvents, p + pos, CHUNKSIZE);
+ if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level])) {
+ return pos;
+ } else {
+ mergeEvents(&fpstats->pastEvents, &fpstats->newEvents);
+ if (penalty > 0) penalty--;
+ }
+ }
+ assert(pos == blockSize);
+ return blockSize;
+ (void)flushEvents; (void)removeEvents;
+}
+
+/* ZSTD_splitBlock_fromBorders(): very fast strategy :
+ * compare fingerprint from beginning and end of the block,
+ * derive from their difference if it's preferable to split in the middle,
+ * repeat the process a second time, for finer grained decision.
+ * 3 times did not brought improvements, so I stopped at 2.
+ * Benefits are good enough for a cheap heuristic.
+ * More accurate splitting saves more, but speed impact is also more perceptible.
+ * For better accuracy, use more elaborate variant *_byChunks.
+ */
+static size_t ZSTD_splitBlock_fromBorders(const void* blockStart, size_t blockSize,
+ void* workspace, size_t wkspSize)
+{
+#define SEGMENT_SIZE 512
+ FPStats* const fpstats = (FPStats*)workspace;
+ Fingerprint* middleEvents = (Fingerprint*)(void*)((char*)workspace + 512 * sizeof(unsigned));
+ assert(blockSize == (128 << 10));
+ assert(workspace != NULL);
+ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
+ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
+ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
+
+ initStats(fpstats);
+ HIST_add(fpstats->pastEvents.events, blockStart, SEGMENT_SIZE);
+ HIST_add(fpstats->newEvents.events, (const char*)blockStart + blockSize - SEGMENT_SIZE, SEGMENT_SIZE);
+ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = SEGMENT_SIZE;
+ if (!compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8))
+ return blockSize;
+
+ HIST_add(middleEvents->events, (const char*)blockStart + blockSize/2 - SEGMENT_SIZE/2, SEGMENT_SIZE);
+ middleEvents->nbEvents = SEGMENT_SIZE;
+ { U64 const distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8);
+ U64 const distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8);
+ U64 const minDistance = SEGMENT_SIZE * SEGMENT_SIZE / 3;
+ if (abs64((S64)distFromBegin - (S64)distFromEnd) < minDistance)
+ return 64 KB;
+ return (distFromBegin > distFromEnd) ? 32 KB : 96 KB;
+ }
+}
+
+size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize,
+ int level,
+ void* workspace, size_t wkspSize)
+{
+ DEBUGLOG(6, "ZSTD_splitBlock (level=%i)", level);
+ assert(0<=level && level<=4);
+ if (level == 0)
+ return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize);
+ /* level >= 1*/
+ return ZSTD_splitBlock_byChunks(blockStart, blockSize, level-1, workspace, wkspSize);
+}
diff --git a/lib/zstd/compress/zstd_preSplit.h b/lib/zstd/compress/zstd_preSplit.h
new file mode 100644
index 000000000000..f98f797fe191
--- /dev/null
+++ b/lib/zstd/compress/zstd_preSplit.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_PRESPLIT_H
+#define ZSTD_PRESPLIT_H
+
+#include <linux/types.h> /* size_t */
+
+#define ZSTD_SLIPBLOCK_WORKSPACESIZE 8208
+
+/* ZSTD_splitBlock():
+ * @level must be a value between 0 and 4.
+ * higher levels spend more energy to detect block boundaries.
+ * @workspace must be aligned for size_t.
+ * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE
+ * note:
+ * For the time being, this function only accepts full 128 KB blocks.
+ * Therefore, @blockSize must be == 128 KB.
+ * While this could be extended to smaller sizes in the future,
+ * it is not yet clear if this would be useful. TBD.
+ */
+size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize,
+ int level,
+ void* workspace, size_t wkspSize);
+
+#endif /* ZSTD_PRESPLIT_H */
diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
index 60958afebc41..ac8b87f48f84 100644
--- a/lib/zstd/decompress/huf_decompress.c
+++ b/lib/zstd/decompress/huf_decompress.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/* ******************************************************************
* huff0 huffman decoder,
* part of Finite State Entropy library
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -19,10 +20,10 @@
#include "../common/compiler.h"
#include "../common/bitstream.h" /* BIT_* */
#include "../common/fse.h" /* to compress headers */
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/error_private.h"
#include "../common/zstd_internal.h"
+#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_countTrailingZeros64 */
/* **************************************************************
* Constants
@@ -34,6 +35,12 @@
* Macros
****************************************************************/
+#ifdef HUF_DISABLE_FAST_DECODE
+# define HUF_ENABLE_FAST_DECODE 0
+#else
+# define HUF_ENABLE_FAST_DECODE 1
+#endif
+
/* These two optional macros force the use one way or another of the two
* Huffman decompression implementations. You can't force in both directions
* at the same time.
@@ -43,27 +50,25 @@
#error "Cannot force the use of the X1 and X2 decoders at the same time!"
#endif
-#if ZSTD_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2
-# define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE
+/* When DYNAMIC_BMI2 is enabled, fast decoders are only called when bmi2 is
+ * supported at runtime, so we can add the BMI2 target attribute.
+ * When it is disabled, we will still get BMI2 if it is enabled statically.
+ */
+#if DYNAMIC_BMI2
+# define HUF_FAST_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE
#else
-# define HUF_ASM_X86_64_BMI2_ATTRS
+# define HUF_FAST_BMI2_ATTRS
#endif
#define HUF_EXTERN_C
#define HUF_ASM_DECL HUF_EXTERN_C
-#if DYNAMIC_BMI2 || (ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
+#if DYNAMIC_BMI2
# define HUF_NEED_BMI2_FUNCTION 1
#else
# define HUF_NEED_BMI2_FUNCTION 0
#endif
-#if !(ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
-# define HUF_NEED_DEFAULT_FUNCTION 1
-#else
-# define HUF_NEED_DEFAULT_FUNCTION 0
-#endif
-
/* **************************************************************
* Error Management
****************************************************************/
@@ -80,6 +85,11 @@
/* **************************************************************
* BMI2 Variant Wrappers
****************************************************************/
+typedef size_t (*HUF_DecompressUsingDTableFn)(void *dst, size_t dstSize,
+ const void *cSrc,
+ size_t cSrcSize,
+ const HUF_DTable *DTable);
+
#if DYNAMIC_BMI2
#define HUF_DGEN(fn) \
@@ -101,9 +111,9 @@
} \
\
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
- size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ size_t cSrcSize, HUF_DTable const* DTable, int flags) \
{ \
- if (bmi2) { \
+ if (flags & HUF_flags_bmi2) { \
return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
} \
return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
@@ -113,9 +123,9 @@
#define HUF_DGEN(fn) \
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
- size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ size_t cSrcSize, HUF_DTable const* DTable, int flags) \
{ \
- (void)bmi2; \
+ (void)flags; \
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
}
@@ -134,43 +144,66 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
return dtd;
}
-#if ZSTD_ENABLE_ASM_X86_64_BMI2
-
-static size_t HUF_initDStream(BYTE const* ip) {
+static size_t HUF_initFastDStream(BYTE const* ip) {
BYTE const lastByte = ip[7];
- size_t const bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ size_t const bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0;
size_t const value = MEM_readLEST(ip) | 1;
assert(bitsConsumed <= 8);
+ assert(sizeof(size_t) == 8);
return value << bitsConsumed;
}
+
+
+/*
+ * The input/output arguments to the Huffman fast decoding loop:
+ *
+ * ip [in/out] - The input pointers, must be updated to reflect what is consumed.
+ * op [in/out] - The output pointers, must be updated to reflect what is written.
+ * bits [in/out] - The bitstream containers, must be updated to reflect the current state.
+ * dt [in] - The decoding table.
+ * ilowest [in] - The beginning of the valid range of the input. Decoders may read
+ * down to this pointer. It may be below iend[0].
+ * oend [in] - The end of the output stream. op[3] must not cross oend.
+ * iend [in] - The end of each input stream. ip[i] may cross iend[i],
+ * as long as it is above ilowest, but that indicates corruption.
+ */
typedef struct {
BYTE const* ip[4];
BYTE* op[4];
U64 bits[4];
void const* dt;
- BYTE const* ilimit;
+ BYTE const* ilowest;
BYTE* oend;
BYTE const* iend[4];
-} HUF_DecompressAsmArgs;
+} HUF_DecompressFastArgs;
+
+typedef void (*HUF_DecompressFastLoopFn)(HUF_DecompressFastArgs*);
/*
- * Initializes args for the asm decoding loop.
- * @returns 0 on success
- * 1 if the fallback implementation should be used.
+ * Initializes args for the fast decoding loop.
+ * @returns 1 on success
+ * 0 if the fallback implementation should be used.
* Or an error code on failure.
*/
-static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable)
+static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable)
{
void const* dt = DTable + 1;
U32 const dtLog = HUF_getDTableDesc(DTable).tableLog;
- const BYTE* const ilimit = (const BYTE*)src + 6 + 8;
+ const BYTE* const istart = (const BYTE*)src;
- BYTE* const oend = (BYTE*)dst + dstSize;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
- /* The following condition is false on x32 platform,
- * but HUF_asm is not compatible with this ABI */
- if (!(MEM_isLittleEndian() && !MEM_32bits())) return 1;
+ /* The fast decoding loop assumes 64-bit little-endian.
+ * This condition is false on x32.
+ */
+ if (!MEM_isLittleEndian() || MEM_32bits())
+ return 0;
+
+ /* Avoid nullptr addition */
+ if (dstSize == 0)
+ return 0;
+ assert(dst != NULL);
/* strict minimum : jump table + 1 byte per stream */
if (srcSize < 10)
@@ -181,11 +214,10 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
* On small inputs we don't have enough data to trigger the fast loop, so use the old decoder.
*/
if (dtLog != HUF_DECODER_FAST_TABLELOG)
- return 1;
+ return 0;
/* Read the jump table. */
{
- const BYTE* const istart = (const BYTE*)src;
size_t const length1 = MEM_readLE16(istart);
size_t const length2 = MEM_readLE16(istart+2);
size_t const length3 = MEM_readLE16(istart+4);
@@ -195,13 +227,11 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
args->iend[2] = args->iend[1] + length2;
args->iend[3] = args->iend[2] + length3;
- /* HUF_initDStream() requires this, and this small of an input
+ /* HUF_initFastDStream() requires this, and this small of an input
* won't benefit from the ASM loop anyways.
- * length1 must be >= 16 so that ip[0] >= ilimit before the loop
- * starts.
*/
- if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8)
- return 1;
+ if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8)
+ return 0;
if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */
}
/* ip[] contains the position that is currently loaded into bits[]. */
@@ -218,7 +248,7 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
/* No point to call the ASM loop for tiny outputs. */
if (args->op[3] >= oend)
- return 1;
+ return 0;
/* bits[] is the bit container.
* It is read from the MSB down to the LSB.
@@ -227,24 +257,25 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
* set, so that CountTrailingZeros(bits[]) can be used
* to count how many bits we've consumed.
*/
- args->bits[0] = HUF_initDStream(args->ip[0]);
- args->bits[1] = HUF_initDStream(args->ip[1]);
- args->bits[2] = HUF_initDStream(args->ip[2]);
- args->bits[3] = HUF_initDStream(args->ip[3]);
-
- /* If ip[] >= ilimit, it is guaranteed to be safe to
- * reload bits[]. It may be beyond its section, but is
- * guaranteed to be valid (>= istart).
- */
- args->ilimit = ilimit;
+ args->bits[0] = HUF_initFastDStream(args->ip[0]);
+ args->bits[1] = HUF_initFastDStream(args->ip[1]);
+ args->bits[2] = HUF_initFastDStream(args->ip[2]);
+ args->bits[3] = HUF_initFastDStream(args->ip[3]);
+
+ /* The decoders must be sure to never read beyond ilowest.
+ * This is lower than iend[0], but allowing decoders to read
+ * down to ilowest can allow an extra iteration or two in the
+ * fast loop.
+ */
+ args->ilowest = istart;
args->oend = oend;
args->dt = dt;
- return 0;
+ return 1;
}
-static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd)
+static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs const* args, int stream, BYTE* segmentEnd)
{
/* Validate that we haven't overwritten. */
if (args->op[stream] > segmentEnd)
@@ -258,15 +289,33 @@ static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs
return ERROR(corruption_detected);
/* Construct the BIT_DStream_t. */
- bit->bitContainer = MEM_readLE64(args->ip[stream]);
- bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]);
- bit->start = (const char*)args->iend[0];
+ assert(sizeof(size_t) == 8);
+ bit->bitContainer = MEM_readLEST(args->ip[stream]);
+ bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]);
+ bit->start = (const char*)args->ilowest;
bit->limitPtr = bit->start + sizeof(size_t);
bit->ptr = (const char*)args->ip[stream];
return 0;
}
-#endif
+
+/* Calls X(N) for each stream 0, 1, 2, 3. */
+#define HUF_4X_FOR_EACH_STREAM(X) \
+ do { \
+ X(0); \
+ X(1); \
+ X(2); \
+ X(3); \
+ } while (0)
+
+/* Calls X(N, var) for each stream 0, 1, 2, 3. */
+#define HUF_4X_FOR_EACH_STREAM_WITH_VAR(X, var) \
+ do { \
+ X(0, (var)); \
+ X(1, (var)); \
+ X(2, (var)); \
+ X(3, (var)); \
+ } while (0)
#ifndef HUF_FORCE_DECOMPRESS_X2
@@ -283,10 +332,11 @@ typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decodi
static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
U64 D4;
if (MEM_isLittleEndian()) {
- D4 = (symbol << 8) + nbBits;
+ D4 = (U64)((symbol << 8) + nbBits);
} else {
- D4 = symbol + (nbBits << 8);
+ D4 = (U64)(symbol + (nbBits << 8));
}
+ assert(D4 < (1U << 16));
D4 *= 0x0001000100010001ULL;
return D4;
}
@@ -329,13 +379,7 @@ typedef struct {
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
} HUF_ReadDTableX1_Workspace;
-
-size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
-{
- return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
-}
-
-size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags)
{
U32 tableLog = 0;
U32 nbSymbols = 0;
@@ -350,7 +394,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
/* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
- iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
+ iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), flags);
if (HUF_isError(iSize)) return iSize;
@@ -377,9 +421,8 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
* rankStart[0] is not filled because there are no entries in the table for
* weight 0.
*/
- {
- int n;
- int nextRankStart = 0;
+ { int n;
+ U32 nextRankStart = 0;
int const unroll = 4;
int const nLimit = (int)nbSymbols - unroll + 1;
for (n=0; n<(int)tableLog+1; n++) {
@@ -406,10 +449,9 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
* We can switch based on the length to a different inner loop which is
* optimized for that particular case.
*/
- {
- U32 w;
- int symbol=wksp->rankVal[0];
- int rankStart=0;
+ { U32 w;
+ int symbol = wksp->rankVal[0];
+ int rankStart = 0;
for (w=1; w<tableLog+1; ++w) {
int const symbolCount = wksp->rankVal[w];
int const length = (1 << w) >> 1;
@@ -483,15 +525,19 @@ HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog
}
#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
- *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
+ do { *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog); } while (0)
-#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
- if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
- HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
+ do { \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
+ } while (0)
-#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
- if (MEM_64bits()) \
- HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
+ do { \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
+ } while (0)
HINT_INLINE size_t
HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
@@ -519,7 +565,7 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons
while (p < pEnd)
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
- return pEnd-pStart;
+ return (size_t)(pEnd-pStart);
}
FORCE_INLINE_TEMPLATE size_t
@@ -529,7 +575,7 @@ HUF_decompress1X1_usingDTable_internal_body(
const HUF_DTable* DTable)
{
BYTE* op = (BYTE*)dst;
- BYTE* const oend = op + dstSize;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd(op, dstSize);
const void* dtPtr = DTable + 1;
const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
BIT_DStream_t bitD;
@@ -545,6 +591,10 @@ HUF_decompress1X1_usingDTable_internal_body(
return dstSize;
}
+/* HUF_decompress4X1_usingDTable_internal_body():
+ * Conditions :
+ * @dstSize >= 6
+ */
FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X1_usingDTable_internal_body(
void* dst, size_t dstSize,
@@ -553,6 +603,7 @@ HUF_decompress4X1_usingDTable_internal_body(
{
/* Check */
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
@@ -588,6 +639,7 @@ HUF_decompress4X1_usingDTable_internal_body(
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ assert(dstSize >= 6); /* validated above */
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
@@ -650,52 +702,173 @@ size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo
}
#endif
-#if HUF_NEED_DEFAULT_FUNCTION
static
size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
-#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
-HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
+HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN;
+
+#endif
+
+static HUF_FAST_BMI2_ATTRS
+void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args)
+{
+ U64 bits[4];
+ BYTE const* ip[4];
+ BYTE* op[4];
+ U16 const* const dtable = (U16 const*)args->dt;
+ BYTE* const oend = args->oend;
+ BYTE const* const ilowest = args->ilowest;
+
+ /* Copy the arguments to local variables */
+ ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
+ ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip));
+ ZSTD_memcpy(&op, &args->op, sizeof(op));
+
+ assert(MEM_isLittleEndian());
+ assert(!MEM_32bits());
+
+ for (;;) {
+ BYTE* olimit;
+ int stream;
+
+ /* Assert loop preconditions */
+#ifndef NDEBUG
+ for (stream = 0; stream < 4; ++stream) {
+ assert(op[stream] <= (stream == 3 ? oend : op[stream + 1]));
+ assert(ip[stream] >= ilowest);
+ }
+#endif
+ /* Compute olimit */
+ {
+ /* Each iteration produces 5 output symbols per stream */
+ size_t const oiters = (size_t)(oend - op[3]) / 5;
+ /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes
+ * per stream.
+ */
+ size_t const iiters = (size_t)(ip[0] - ilowest) / 7;
+ /* We can safely run iters iterations before running bounds checks */
+ size_t const iters = MIN(oiters, iiters);
+ size_t const symbols = iters * 5;
+
+ /* We can simply check that op[3] < olimit, instead of checking all
+ * of our bounds, since we can't hit the other bounds until we've run
+ * iters iterations, which only happens when op[3] == olimit.
+ */
+ olimit = op[3] + symbols;
+
+ /* Exit fast decoding loop once we reach the end. */
+ if (op[3] == olimit)
+ break;
+
+ /* Exit the decoding loop if any input pointer has crossed the
+ * previous one. This indicates corruption, and a precondition
+ * to our loop is that ip[i] >= ip[0].
+ */
+ for (stream = 1; stream < 4; ++stream) {
+ if (ip[stream] < ip[stream - 1])
+ goto _out;
+ }
+ }
+
+#ifndef NDEBUG
+ for (stream = 1; stream < 4; ++stream) {
+ assert(ip[stream] >= ip[stream - 1]);
+ }
+#endif
+
+#define HUF_4X1_DECODE_SYMBOL(_stream, _symbol) \
+ do { \
+ int const index = (int)(bits[(_stream)] >> 53); \
+ int const entry = (int)dtable[index]; \
+ bits[(_stream)] <<= (entry & 0x3F); \
+ op[(_stream)][(_symbol)] = (BYTE)((entry >> 8) & 0xFF); \
+ } while (0)
+
+#define HUF_4X1_RELOAD_STREAM(_stream) \
+ do { \
+ int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
+ int const nbBits = ctz & 7; \
+ int const nbBytes = ctz >> 3; \
+ op[(_stream)] += 5; \
+ ip[(_stream)] -= nbBytes; \
+ bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
+ bits[(_stream)] <<= nbBits; \
+ } while (0)
+
+ /* Manually unroll the loop because compilers don't consistently
+ * unroll the inner loops, which destroys performance.
+ */
+ do {
+ /* Decode 5 symbols in each of the 4 streams */
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 1);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 2);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 3);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 4);
+
+ /* Reload each of the 4 the bitstreams */
+ HUF_4X_FOR_EACH_STREAM(HUF_4X1_RELOAD_STREAM);
+ } while (op[3] < olimit);
+
+#undef HUF_4X1_DECODE_SYMBOL
+#undef HUF_4X1_RELOAD_STREAM
+ }
-static HUF_ASM_X86_64_BMI2_ATTRS
+_out:
+
+ /* Save the final values of each of the state variables back to args. */
+ ZSTD_memcpy(&args->bits, &bits, sizeof(bits));
+ ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip));
+ ZSTD_memcpy(&args->op, &op, sizeof(op));
+}
+
+/*
+ * @returns @p dstSize on success (>= 6)
+ * 0 if the fallback implementation should be used
+ * An error if an error occurred
+ */
+static HUF_FAST_BMI2_ATTRS
size_t
-HUF_decompress4X1_usingDTable_internal_bmi2_asm(
+HUF_decompress4X1_usingDTable_internal_fast(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
+ const HUF_DTable* DTable,
+ HUF_DecompressFastLoopFn loopFn)
{
void const* dt = DTable + 1;
- const BYTE* const iend = (const BYTE*)cSrc + 6;
- BYTE* const oend = (BYTE*)dst + dstSize;
- HUF_DecompressAsmArgs args;
- {
- size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
- FORWARD_IF_ERROR(ret, "Failed to init asm args");
- if (ret != 0)
- return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+ BYTE const* const ilowest = (BYTE const*)cSrc;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
+ HUF_DecompressFastArgs args;
+ { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
+ FORWARD_IF_ERROR(ret, "Failed to init fast loop args");
+ if (ret == 0)
+ return 0;
}
- assert(args.ip[0] >= args.ilimit);
- HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args);
+ assert(args.ip[0] >= args.ilowest);
+ loopFn(&args);
- /* Our loop guarantees that ip[] >= ilimit and that we haven't
+ /* Our loop guarantees that ip[] >= ilowest and that we haven't
* overwritten any op[].
*/
- assert(args.ip[0] >= iend);
- assert(args.ip[1] >= iend);
- assert(args.ip[2] >= iend);
- assert(args.ip[3] >= iend);
+ assert(args.ip[0] >= ilowest);
+ assert(args.ip[0] >= ilowest);
+ assert(args.ip[1] >= ilowest);
+ assert(args.ip[2] >= ilowest);
+ assert(args.ip[3] >= ilowest);
assert(args.op[3] <= oend);
- (void)iend;
+
+ assert(ilowest == args.ilowest);
+ assert(ilowest + 6 == args.iend[0]);
+ (void)ilowest;
/* finish bit streams one by one. */
- {
- size_t const segmentSize = (dstSize+3) / 4;
+ { size_t const segmentSize = (dstSize+3) / 4;
BYTE* segmentEnd = (BYTE*)dst;
int i;
for (i = 0; i < 4; ++i) {
@@ -712,97 +885,59 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm(
}
/* decoded size */
+ assert(dstSize != 0);
return dstSize;
}
-#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
-
-typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
- const void *cSrc,
- size_t cSrcSize,
- const HUF_DTable *DTable);
HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
- size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
+ size_t cSrcSize, HUF_DTable const* DTable, int flags)
{
+ HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X1_usingDTable_internal_default;
+ HUF_DecompressFastLoopFn loopFn = HUF_decompress4X1_usingDTable_internal_fast_c_loop;
+
#if DYNAMIC_BMI2
- if (bmi2) {
+ if (flags & HUF_flags_bmi2) {
+ fallbackFn = HUF_decompress4X1_usingDTable_internal_bmi2;
# if ZSTD_ENABLE_ASM_X86_64_BMI2
- return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
-# else
- return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+ if (!(flags & HUF_flags_disableAsm)) {
+ loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop;
+ }
# endif
+ } else {
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
}
-#else
- (void)bmi2;
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
- return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
-#else
- return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
+ if (!(flags & HUF_flags_disableAsm)) {
+ loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop;
+ }
#endif
-}
-
-
-size_t HUF_decompress1X1_usingDTable(
- void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 0) return ERROR(GENERIC);
- return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
- const BYTE* ip = (const BYTE*) cSrc;
-
- size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
- if (HUF_isError(hSize)) return hSize;
- if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
- ip += hSize; cSrcSize -= hSize;
-
- return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
-}
-
-
-size_t HUF_decompress4X1_usingDTable(
- void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 0) return ERROR(GENERIC);
- return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+ if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) {
+ size_t const ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn);
+ if (ret != 0)
+ return ret;
+ }
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
}
-static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+static size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize, int bmi2)
+ void* workSpace, size_t wkspSize, int flags)
{
const BYTE* ip = (const BYTE*) cSrc;
- size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
- return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
-}
-
-size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
- return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
}
-
#endif /* HUF_FORCE_DECOMPRESS_X2 */
@@ -985,7 +1120,7 @@ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32
static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList,
- const U32* rankStart, rankValCol_t *rankValOrigin, const U32 maxWeight,
+ const U32* rankStart, rankValCol_t* rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32* const rankVal = rankValOrigin[0];
@@ -1040,14 +1175,7 @@ typedef struct {
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
const void* src, size_t srcSize,
- void* workSpace, size_t wkspSize)
-{
- return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
-}
-
-size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable,
- const void* src, size_t srcSize,
- void* workSpace, size_t wkspSize, int bmi2)
+ void* workSpace, size_t wkspSize, int flags)
{
U32 tableLog, maxW, nbSymbols;
DTableDesc dtd = HUF_getDTableDesc(DTable);
@@ -1069,7 +1197,7 @@ size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable,
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
/* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
- iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2);
+ iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), flags);
if (HUF_isError(iSize)) return iSize;
/* check result */
@@ -1159,15 +1287,19 @@ HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, c
}
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
- ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+ do { ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); } while (0)
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
- if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
- ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ do { \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
+ } while (0)
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
- if (MEM_64bits()) \
- ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ do { \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
+ } while (0)
HINT_INLINE size_t
HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
@@ -1227,7 +1359,7 @@ HUF_decompress1X2_usingDTable_internal_body(
/* decode */
{ BYTE* const ostart = (BYTE*) dst;
- BYTE* const oend = ostart + dstSize;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, dstSize);
const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
@@ -1240,6 +1372,11 @@ HUF_decompress1X2_usingDTable_internal_body(
/* decoded size */
return dstSize;
}
+
+/* HUF_decompress4X2_usingDTable_internal_body():
+ * Conditions:
+ * @dstSize >= 6
+ */
FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X2_usingDTable_internal_body(
void* dst, size_t dstSize,
@@ -1247,6 +1384,7 @@ HUF_decompress4X2_usingDTable_internal_body(
const HUF_DTable* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
@@ -1280,8 +1418,9 @@ HUF_decompress4X2_usingDTable_internal_body(
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
- if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
- if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ assert(dstSize >= 6 /* validated above */);
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
@@ -1366,44 +1505,191 @@ size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo
}
#endif
-#if HUF_NEED_DEFAULT_FUNCTION
static
size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
-#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
-HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
+HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN;
+
+#endif
+
+static HUF_FAST_BMI2_ATTRS
+void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args)
+{
+ U64 bits[4];
+ BYTE const* ip[4];
+ BYTE* op[4];
+ BYTE* oend[4];
+ HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt;
+ BYTE const* const ilowest = args->ilowest;
+
+ /* Copy the arguments to local registers. */
+ ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
+ ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip));
+ ZSTD_memcpy(&op, &args->op, sizeof(op));
+
+ oend[0] = op[1];
+ oend[1] = op[2];
+ oend[2] = op[3];
+ oend[3] = args->oend;
+
+ assert(MEM_isLittleEndian());
+ assert(!MEM_32bits());
+
+ for (;;) {
+ BYTE* olimit;
+ int stream;
+
+ /* Assert loop preconditions */
+#ifndef NDEBUG
+ for (stream = 0; stream < 4; ++stream) {
+ assert(op[stream] <= oend[stream]);
+ assert(ip[stream] >= ilowest);
+ }
+#endif
+ /* Compute olimit */
+ {
+ /* Each loop does 5 table lookups for each of the 4 streams.
+ * Each table lookup consumes up to 11 bits of input, and produces
+ * up to 2 bytes of output.
+ */
+ /* We can consume up to 7 bytes of input per iteration per stream.
+ * We also know that each input pointer is >= ip[0]. So we can run
+ * iters loops before running out of input.
+ */
+ size_t iters = (size_t)(ip[0] - ilowest) / 7;
+ /* Each iteration can produce up to 10 bytes of output per stream.
+ * Each output stream my advance at different rates. So take the
+ * minimum number of safe iterations among all the output streams.
+ */
+ for (stream = 0; stream < 4; ++stream) {
+ size_t const oiters = (size_t)(oend[stream] - op[stream]) / 10;
+ iters = MIN(iters, oiters);
+ }
+
+ /* Each iteration produces at least 5 output symbols. So until
+ * op[3] crosses olimit, we know we haven't executed iters
+ * iterations yet. This saves us maintaining an iters counter,
+ * at the expense of computing the remaining # of iterations
+ * more frequently.
+ */
+ olimit = op[3] + (iters * 5);
+
+ /* Exit the fast decoding loop once we reach the end. */
+ if (op[3] == olimit)
+ break;
+
+ /* Exit the decoding loop if any input pointer has crossed the
+ * previous one. This indicates corruption, and a precondition
+ * to our loop is that ip[i] >= ip[0].
+ */
+ for (stream = 1; stream < 4; ++stream) {
+ if (ip[stream] < ip[stream - 1])
+ goto _out;
+ }
+ }
+
+#ifndef NDEBUG
+ for (stream = 1; stream < 4; ++stream) {
+ assert(ip[stream] >= ip[stream - 1]);
+ }
+#endif
-static HUF_ASM_X86_64_BMI2_ATTRS size_t
-HUF_decompress4X2_usingDTable_internal_bmi2_asm(
+#define HUF_4X2_DECODE_SYMBOL(_stream, _decode3) \
+ do { \
+ if ((_decode3) || (_stream) != 3) { \
+ int const index = (int)(bits[(_stream)] >> 53); \
+ HUF_DEltX2 const entry = dtable[index]; \
+ MEM_write16(op[(_stream)], entry.sequence); \
+ bits[(_stream)] <<= (entry.nbBits) & 0x3F; \
+ op[(_stream)] += (entry.length); \
+ } \
+ } while (0)
+
+#define HUF_4X2_RELOAD_STREAM(_stream) \
+ do { \
+ HUF_4X2_DECODE_SYMBOL(3, 1); \
+ { \
+ int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
+ int const nbBits = ctz & 7; \
+ int const nbBytes = ctz >> 3; \
+ ip[(_stream)] -= nbBytes; \
+ bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
+ bits[(_stream)] <<= nbBits; \
+ } \
+ } while (0)
+
+ /* Manually unroll the loop because compilers don't consistently
+ * unroll the inner loops, which destroys performance.
+ */
+ do {
+ /* Decode 5 symbols from each of the first 3 streams.
+ * The final stream will be decoded during the reload phase
+ * to reduce register pressure.
+ */
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+
+ /* Decode one symbol from the final stream */
+ HUF_4X2_DECODE_SYMBOL(3, 1);
+
+ /* Decode 4 symbols from the final stream & reload bitstreams.
+ * The final stream is reloaded last, meaning that all 5 symbols
+ * are decoded from the final stream before it is reloaded.
+ */
+ HUF_4X_FOR_EACH_STREAM(HUF_4X2_RELOAD_STREAM);
+ } while (op[3] < olimit);
+ }
+
+#undef HUF_4X2_DECODE_SYMBOL
+#undef HUF_4X2_RELOAD_STREAM
+
+_out:
+
+ /* Save the final values of each of the state variables back to args. */
+ ZSTD_memcpy(&args->bits, &bits, sizeof(bits));
+ ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip));
+ ZSTD_memcpy(&args->op, &op, sizeof(op));
+}
+
+
+static HUF_FAST_BMI2_ATTRS size_t
+HUF_decompress4X2_usingDTable_internal_fast(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable) {
+ const HUF_DTable* DTable,
+ HUF_DecompressFastLoopFn loopFn) {
void const* dt = DTable + 1;
- const BYTE* const iend = (const BYTE*)cSrc + 6;
- BYTE* const oend = (BYTE*)dst + dstSize;
- HUF_DecompressAsmArgs args;
+ const BYTE* const ilowest = (const BYTE*)cSrc;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
+ HUF_DecompressFastArgs args;
{
- size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
+ size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
FORWARD_IF_ERROR(ret, "Failed to init asm args");
- if (ret != 0)
- return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+ if (ret == 0)
+ return 0;
}
- assert(args.ip[0] >= args.ilimit);
- HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args);
+ assert(args.ip[0] >= args.ilowest);
+ loopFn(&args);
/* note : op4 already verified within main loop */
- assert(args.ip[0] >= iend);
- assert(args.ip[1] >= iend);
- assert(args.ip[2] >= iend);
- assert(args.ip[3] >= iend);
+ assert(args.ip[0] >= ilowest);
+ assert(args.ip[1] >= ilowest);
+ assert(args.ip[2] >= ilowest);
+ assert(args.ip[3] >= ilowest);
assert(args.op[3] <= oend);
- (void)iend;
+
+ assert(ilowest == args.ilowest);
+ assert(ilowest + 6 == args.iend[0]);
+ (void)ilowest;
/* finish bitStreams one by one */
{
@@ -1426,91 +1712,72 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm(
/* decoded size */
return dstSize;
}
-#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
- size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
+ size_t cSrcSize, HUF_DTable const* DTable, int flags)
{
+ HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X2_usingDTable_internal_default;
+ HUF_DecompressFastLoopFn loopFn = HUF_decompress4X2_usingDTable_internal_fast_c_loop;
+
#if DYNAMIC_BMI2
- if (bmi2) {
+ if (flags & HUF_flags_bmi2) {
+ fallbackFn = HUF_decompress4X2_usingDTable_internal_bmi2;
# if ZSTD_ENABLE_ASM_X86_64_BMI2
- return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
-# else
- return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+ if (!(flags & HUF_flags_disableAsm)) {
+ loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop;
+ }
# endif
+ } else {
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
}
-#else
- (void)bmi2;
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
- return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
-#else
- return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
+ if (!(flags & HUF_flags_disableAsm)) {
+ loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop;
+ }
#endif
+
+ if (HUF_ENABLE_FAST_DECODE && !(flags & HUF_flags_disableFast)) {
+ size_t const ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn);
+ if (ret != 0)
+ return ret;
+ }
+ return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
}
HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
-size_t HUF_decompress1X2_usingDTable(
- void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 1) return ERROR(GENERIC);
- return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
+ void* workSpace, size_t wkspSize, int flags)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
- workSpace, wkspSize);
+ workSpace, wkspSize, flags);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
- return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags);
}
-
-size_t HUF_decompress4X2_usingDTable(
- void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 1) return ERROR(GENERIC);
- return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
-static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+static size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize, int bmi2)
+ void* workSpace, size_t wkspSize, int flags)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
- workSpace, wkspSize);
+ workSpace, wkspSize, flags);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
- return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
}
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
- return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
-}
-
-
#endif /* HUF_FORCE_DECOMPRESS_X1 */
@@ -1518,44 +1785,6 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
/* Universal decompression selectors */
/* ***********************************/
-size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
- (void)dtd;
- assert(dtd.tableType == 0);
- return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
- (void)dtd;
- assert(dtd.tableType == 1);
- return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#else
- return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
- HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#endif
-}
-
-size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
- (void)dtd;
- assert(dtd.tableType == 0);
- return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
- (void)dtd;
- assert(dtd.tableType == 1);
- return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#else
- return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
- HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-#endif
-}
-
#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
@@ -1610,36 +1839,9 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
#endif
}
-
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
- size_t dstSize, const void* cSrc,
- size_t cSrcSize, void* workSpace,
- size_t wkspSize)
-{
- /* validation checks */
- if (dstSize == 0) return ERROR(dstSize_tooSmall);
- if (cSrcSize == 0) return ERROR(corruption_detected);
-
- { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-#if defined(HUF_FORCE_DECOMPRESS_X1)
- (void)algoNb;
- assert(algoNb == 0);
- return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
-#elif defined(HUF_FORCE_DECOMPRESS_X2)
- (void)algoNb;
- assert(algoNb == 1);
- return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
-#else
- return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize):
- HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
-#endif
- }
-}
-
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
+ void* workSpace, size_t wkspSize, int flags)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
@@ -1652,71 +1854,71 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize);
+ cSrcSize, workSpace, wkspSize, flags);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize);
+ cSrcSize, workSpace, wkspSize, flags);
#else
return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize):
+ cSrcSize, workSpace, wkspSize, flags):
HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize);
+ cSrcSize, workSpace, wkspSize, flags);
#endif
}
}
-size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
- return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
- return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#else
- return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
- HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#endif
}
#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags)
{
const BYTE* ip = (const BYTE*) cSrc;
- size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
- return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
}
#endif
-size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
- return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
- return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#else
- return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
- HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
#endif
}
-size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
@@ -1726,15 +1928,14 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
- return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
- return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
#else
- return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
- HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) :
+ HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
#endif
}
}
-
diff --git a/lib/zstd/decompress/zstd_ddict.c b/lib/zstd/decompress/zstd_ddict.c
index dbbc7919de53..30ef65e1ab5c 100644
--- a/lib/zstd/decompress/zstd_ddict.c
+++ b/lib/zstd/decompress/zstd_ddict.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -14,12 +15,12 @@
/*-*******************************************************
* Dependencies
*********************************************************/
+#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "zstd_decompress_internal.h"
#include "zstd_ddict.h"
@@ -131,7 +132,7 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
ZSTD_memcpy(internalBuffer, dict, dictSize);
}
ddict->dictSize = dictSize;
- ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ ddict->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */
/* parse dictionary content */
FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
@@ -237,5 +238,5 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
{
if (ddict==NULL) return 0;
- return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+ return ddict->dictID;
}
diff --git a/lib/zstd/decompress/zstd_ddict.h b/lib/zstd/decompress/zstd_ddict.h
index 8c1a79d666f8..de459a0dacd1 100644
--- a/lib/zstd/decompress/zstd_ddict.h
+++ b/lib/zstd/decompress/zstd_ddict.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
index 6b3177c94711..bb009554e3a6 100644
--- a/lib/zstd/decompress/zstd_decompress.c
+++ b/lib/zstd/decompress/zstd_decompress.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -53,13 +54,15 @@
* Dependencies
*********************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
+#include "../common/error_private.h"
+#include "../common/zstd_internal.h" /* blockProperties_t */
#include "../common/mem.h" /* low level memory routines */
+#include "../common/bits.h" /* ZSTD_highbit32 */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
-#include "../common/zstd_internal.h" /* blockProperties_t */
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
@@ -72,11 +75,11 @@
*************************************/
#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
-#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
- * Currently, that means a 0.75 load factor.
- * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
- * the load factor of the ddict hash set.
- */
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
+ * Currently, that means a 0.75 load factor.
+ * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
+ * the load factor of the ddict hash set.
+ */
#define DDICT_HASHSET_TABLE_BASE_SIZE 64
#define DDICT_HASHSET_RESIZE_FACTOR 2
@@ -237,6 +240,8 @@ static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
dctx->outBufferMode = ZSTD_bm_buffered;
dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
+ dctx->disableHufAsm = 0;
+ dctx->maxBlockSizeParam = 0;
}
static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
@@ -253,6 +258,7 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
dctx->streamStage = zdss_init;
dctx->noForwardProgress = 0;
dctx->oversizedDuration = 0;
+ dctx->isFrameDecompression = 1;
#if DYNAMIC_BMI2
dctx->bmi2 = ZSTD_cpuSupportsBmi2();
#endif
@@ -421,16 +427,40 @@ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
* note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
- * or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+** or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
{
const BYTE* ip = (const BYTE*)src;
size_t const minInputSize = ZSTD_startingInputLength(format);
- ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
- if (srcSize < minInputSize) return minInputSize;
- RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
+ DEBUGLOG(5, "ZSTD_getFrameHeader_advanced: minInputSize = %zu, srcSize = %zu", minInputSize, srcSize);
+
+ if (srcSize > 0) {
+ /* note : technically could be considered an assert(), since it's an invalid entry */
+ RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter : src==NULL, but srcSize>0");
+ }
+ if (srcSize < minInputSize) {
+ if (srcSize > 0 && format != ZSTD_f_zstd1_magicless) {
+ /* when receiving less than @minInputSize bytes,
+ * control these bytes at least correspond to a supported magic number
+ * in order to error out early if they don't.
+ **/
+ size_t const toCopy = MIN(4, srcSize);
+ unsigned char hbuf[4]; MEM_writeLE32(hbuf, ZSTD_MAGICNUMBER);
+ assert(src != NULL);
+ ZSTD_memcpy(hbuf, src, toCopy);
+ if ( MEM_readLE32(hbuf) != ZSTD_MAGICNUMBER ) {
+ /* not a zstd frame : let's check if it's a skippable frame */
+ MEM_writeLE32(hbuf, ZSTD_MAGIC_SKIPPABLE_START);
+ ZSTD_memcpy(hbuf, src, toCopy);
+ if ((MEM_readLE32(hbuf) & ZSTD_MAGIC_SKIPPABLE_MASK) != ZSTD_MAGIC_SKIPPABLE_START) {
+ RETURN_ERROR(prefix_unknown,
+ "first bytes don't correspond to any supported magic number");
+ } } }
+ return minInputSize;
+ }
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzers may not understand that zfhPtr will be read only if return value is zero, since they are 2 different signals */
if ( (format != ZSTD_f_zstd1_magicless)
&& (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
@@ -438,8 +468,10 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
- zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
zfhPtr->frameType = ZSTD_skippableFrame;
+ zfhPtr->dictID = MEM_readLE32(src) - ZSTD_MAGIC_SKIPPABLE_START;
+ zfhPtr->headerSize = ZSTD_SKIPPABLEHEADERSIZE;
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
return 0;
}
RETURN_ERROR(prefix_unknown, "");
@@ -508,7 +540,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
* or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize)
{
return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
}
@@ -520,7 +552,7 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src
* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
{
- { ZSTD_frameHeader zfh;
+ { ZSTD_FrameHeader zfh;
if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
return ZSTD_CONTENTSIZE_ERROR;
if (zfh.frameType == ZSTD_skippableFrame) {
@@ -540,49 +572,52 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize)
sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
frameParameter_unsupported, "");
- {
- size_t const skippableSize = skippableHeaderSize + sizeU32;
+ { size_t const skippableSize = skippableHeaderSize + sizeU32;
RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
return skippableSize;
}
}
/*! ZSTD_readSkippableFrame() :
- * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ * Retrieves content of a skippable frame, and writes it to dst buffer.
*
* The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
* i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
* in the magicVariant.
*
- * Returns an error if destination buffer is not large enough, or if the frame is not skippable.
+ * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame.
*
* @return : number of bytes written or a ZSTD error.
*/
-ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
- const void* src, size_t srcSize)
+size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity,
+ unsigned* magicVariant, /* optional, can be NULL */
+ const void* src, size_t srcSize)
{
- U32 const magicNumber = MEM_readLE32(src);
- size_t skippableFrameSize = readSkippableFrameSize(src, srcSize);
- size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE;
-
- /* check input validity */
- RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, "");
- RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, "");
- RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, "");
+ RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
- /* deliver payload */
- if (skippableContentSize > 0 && dst != NULL)
- ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize);
- if (magicVariant != NULL)
- *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START;
- return skippableContentSize;
+ { U32 const magicNumber = MEM_readLE32(src);
+ size_t skippableFrameSize = readSkippableFrameSize(src, srcSize);
+ size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE;
+
+ /* check input validity */
+ RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, "");
+ RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, "");
+ RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, "");
+
+ /* deliver payload */
+ if (skippableContentSize > 0 && dst != NULL)
+ ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize);
+ if (magicVariant != NULL)
+ *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START;
+ return skippableContentSize;
+ }
}
/* ZSTD_findDecompressedSize() :
- * compatible with legacy mode
* `srcSize` must be the exact length of some number of ZSTD compressed and/or
* skippable frames
- * @return : decompressed size of the frames contained */
+ * note: compatible with legacy mode
+ * @return : decompressed size of the frames contained */
unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
{
unsigned long long totalDstSize = 0;
@@ -592,9 +627,7 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
- if (ZSTD_isError(skippableSize)) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
+ if (ZSTD_isError(skippableSize)) return ZSTD_CONTENTSIZE_ERROR;
assert(skippableSize <= srcSize);
src = (const BYTE *)src + skippableSize;
@@ -602,17 +635,17 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
continue;
}
- { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
- if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
+ { unsigned long long const fcs = ZSTD_getFrameContentSize(src, srcSize);
+ if (fcs >= ZSTD_CONTENTSIZE_ERROR) return fcs;
- /* check for overflow */
- if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
- totalDstSize += ret;
+ if (totalDstSize + fcs < totalDstSize)
+ return ZSTD_CONTENTSIZE_ERROR; /* check for overflow */
+ totalDstSize += fcs;
}
+ /* skip to next frame */
{ size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
- if (ZSTD_isError(frameSrcSize)) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
+ if (ZSTD_isError(frameSrcSize)) return ZSTD_CONTENTSIZE_ERROR;
+ assert(frameSrcSize <= srcSize);
src = (const BYTE *)src + frameSrcSize;
srcSize -= frameSrcSize;
@@ -676,13 +709,13 @@ static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
return frameSizeInfo;
}
-static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
+static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize, ZSTD_format_e format)
{
ZSTD_frameSizeInfo frameSizeInfo;
ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
- if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+ if (format == ZSTD_f_zstd1 && (srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
&& (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
@@ -693,10 +726,10 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize
const BYTE* const ipstart = ip;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
/* Extract Frame Header */
- { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ { size_t const ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format);
if (ZSTD_isError(ret))
return ZSTD_errorFrameSizeInfo(ret);
if (ret > 0)
@@ -730,28 +763,31 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize
ip += 4;
}
+ frameSizeInfo.nbBlocks = nbBlocks;
frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
? zfh.frameContentSize
- : nbBlocks * zfh.blockSizeMax;
+ : (unsigned long long)nbBlocks * zfh.blockSizeMax;
return frameSizeInfo;
}
}
+static size_t ZSTD_findFrameCompressedSize_advanced(const void *src, size_t srcSize, ZSTD_format_e format) {
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format);
+ return frameSizeInfo.compressedSize;
+}
+
/* ZSTD_findFrameCompressedSize() :
- * compatible with legacy mode
- * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
- * `srcSize` must be at least as large as the frame contained
- * @return : the compressed size of the frame starting at `src` */
+ * See docs in zstd.h
+ * Note: compatible with legacy mode */
size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
{
- ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
- return frameSizeInfo.compressedSize;
+ return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_f_zstd1);
}
/* ZSTD_decompressBound() :
* compatible with legacy mode
- * `src` must point to the start of a ZSTD frame or a skippeable frame
+ * `src` must point to the start of a ZSTD frame or a skippable frame
* `srcSize` must be at least as large as the frame contained
* @return : the maximum decompressed size of the compressed source
*/
@@ -760,7 +796,7 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
unsigned long long bound = 0;
/* Iterate over each frame */
while (srcSize > 0) {
- ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1);
size_t const compressedSize = frameSizeInfo.compressedSize;
unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
@@ -773,6 +809,48 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
return bound;
}
+size_t ZSTD_decompressionMargin(void const* src, size_t srcSize)
+{
+ size_t margin = 0;
+ unsigned maxBlockSize = 0;
+
+ /* Iterate over each frame */
+ while (srcSize > 0) {
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1);
+ size_t const compressedSize = frameSizeInfo.compressedSize;
+ unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+ ZSTD_FrameHeader zfh;
+
+ FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), "");
+ if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+ return ERROR(corruption_detected);
+
+ if (zfh.frameType == ZSTD_frame) {
+ /* Add the frame header to our margin */
+ margin += zfh.headerSize;
+ /* Add the checksum to our margin */
+ margin += zfh.checksumFlag ? 4 : 0;
+ /* Add 3 bytes per block */
+ margin += 3 * frameSizeInfo.nbBlocks;
+
+ /* Compute the max block size */
+ maxBlockSize = MAX(maxBlockSize, zfh.blockSizeMax);
+ } else {
+ assert(zfh.frameType == ZSTD_skippableFrame);
+ /* Add the entire skippable frame size to our margin. */
+ margin += compressedSize;
+ }
+
+ assert(srcSize >= compressedSize);
+ src = (const BYTE*)src + compressedSize;
+ srcSize -= compressedSize;
+ }
+
+ /* Add the max block size back to the margin. */
+ margin += maxBlockSize;
+
+ return margin;
+}
/*-*************************************************************
* Frame decoding
@@ -815,7 +893,7 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
return regenSize;
}
-static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
+static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, int streaming)
{
(void)dctx;
(void)uncompressedSize;
@@ -856,6 +934,10 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
}
+ /* Shrink the blockSizeMax if enabled */
+ if (dctx->maxBlockSizeParam != 0)
+ dctx->fParams.blockSizeMax = MIN(dctx->fParams.blockSizeMax, (unsigned)dctx->maxBlockSizeParam);
+
/* Loop on each block */
while (1) {
BYTE* oBlockEnd = oend;
@@ -888,7 +970,8 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
switch(blockProperties.blockType)
{
case bt_compressed:
- decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming);
+ assert(dctx->isFrameDecompression == 1);
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, not_streaming);
break;
case bt_raw :
/* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */
@@ -901,12 +984,14 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
default:
RETURN_ERROR(corruption_detected, "invalid block type");
}
-
- if (ZSTD_isError(decodedSize)) return decodedSize;
- if (dctx->validateChecksum)
+ FORWARD_IF_ERROR(decodedSize, "Block decompression failure");
+ DEBUGLOG(5, "Decompressed block of dSize = %u", (unsigned)decodedSize);
+ if (dctx->validateChecksum) {
xxh64_update(&dctx->xxhState, op, decodedSize);
- if (decodedSize != 0)
+ }
+ if (decodedSize) /* support dst = NULL,0 */ {
op += decodedSize;
+ }
assert(ip != NULL);
ip += cBlockSize;
remainingSrcSize -= cBlockSize;
@@ -930,12 +1015,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
}
ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
/* Allow caller to get size read */
+ DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %i, consuming %i bytes of input", (int)(op-ostart), (int)(ip - (const BYTE*)*srcPtr));
*srcPtr = ip;
*srcSizePtr = remainingSrcSize;
return (size_t)(op-ostart);
}
-static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+static
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize,
@@ -955,17 +1043,18 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
- { U32 const magicNumber = MEM_readLE32(src);
- DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
- (unsigned)magicNumber, ZSTD_MAGICNUMBER);
+ if (dctx->format == ZSTD_f_zstd1 && srcSize >= 4) {
+ U32 const magicNumber = MEM_readLE32(src);
+ DEBUGLOG(5, "reading magic number %08X", (unsigned)magicNumber);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ /* skippable frame detected : skip it */
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
- FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
+ FORWARD_IF_ERROR(skippableSize, "invalid skippable frame");
assert(skippableSize <= srcSize);
src = (const BYTE *)src + skippableSize;
srcSize -= skippableSize;
- continue;
+ continue; /* check next frame */
} }
if (ddict) {
@@ -1061,8 +1150,8 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr
size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
/*
- * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed,
- * we allow taking a partial block as the input. Currently only raw uncompressed blocks can
+ * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we
+ * allow taking a partial block as the input. Currently only raw uncompressed blocks can
* be streamed.
*
* For blocks that can be streamed, this allows us to reduce the latency until we produce
@@ -1181,7 +1270,8 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
{
case bt_compressed:
DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
- rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming);
+ assert(dctx->isFrameDecompression == 1);
+ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, is_streaming);
dctx->expected = 0; /* Streaming not supported */
break;
case bt_raw :
@@ -1250,6 +1340,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
case ZSTDds_decodeSkippableHeader:
assert(src != NULL);
assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
+ assert(dctx->format != ZSTD_f_zstd1_magicless);
ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
dctx->stage = ZSTDds_skipFrame;
@@ -1262,7 +1353,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
default:
assert(0); /* impossible */
- RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */
}
}
@@ -1303,11 +1394,11 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
/* in minimal huffman, we always use X1 variants */
size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
dictPtr, dictEnd - dictPtr,
- workspace, workspaceSize);
+ workspace, workspaceSize, /* flags */ 0);
#else
size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
dictPtr, (size_t)(dictEnd - dictPtr),
- workspace, workspaceSize);
+ workspace, workspaceSize, /* flags */ 0);
#endif
RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
dictPtr += hSize;
@@ -1403,10 +1494,11 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
dctx->prefixStart = NULL;
dctx->virtualStart = NULL;
dctx->dictEnd = NULL;
- dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ dctx->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */
dctx->litEntropy = dctx->fseEntropy = 0;
dctx->dictID = 0;
dctx->bType = bt_reserved;
+ dctx->isFrameDecompression = 1;
ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
dctx->LLTptr = dctx->entropy.LLTable;
@@ -1465,7 +1557,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
* This could for one of the following reasons :
* - The frame does not require a dictionary (most common case).
* - The frame was built with dictID intentionally removed.
- * Needed dictionary is a hidden information.
+ * Needed dictionary is a hidden piece of information.
* Note : this use case also happens when using a non-conformant dictionary.
* - `srcSize` is too small, and as a result, frame header could not be decoded.
* Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
@@ -1474,7 +1566,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
* ZSTD_getFrameHeader(), which will provide a more precise error code. */
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
{
- ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
+ ZSTD_FrameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 };
size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
if (ZSTD_isError(hError)) return 0;
return zfp.dictID;
@@ -1581,7 +1673,9 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di
size_t ZSTD_initDStream(ZSTD_DStream* zds)
{
DEBUGLOG(4, "ZSTD_initDStream");
- return ZSTD_initDStream_usingDDict(zds, NULL);
+ FORWARD_IF_ERROR(ZSTD_DCtx_reset(zds, ZSTD_reset_session_only), "");
+ FORWARD_IF_ERROR(ZSTD_DCtx_refDDict(zds, NULL), "");
+ return ZSTD_startingInputLength(zds->format);
}
/* ZSTD_initDStream_usingDDict() :
@@ -1589,6 +1683,7 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds)
* this function cannot fail */
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
{
+ DEBUGLOG(4, "ZSTD_initDStream_usingDDict");
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
return ZSTD_startingInputLength(dctx->format);
@@ -1599,6 +1694,7 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
* this function cannot fail */
size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
+ DEBUGLOG(4, "ZSTD_resetDStream");
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
return ZSTD_startingInputLength(dctx->format);
}
@@ -1670,6 +1766,15 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
return bounds;
+ case ZSTD_d_disableHuffmanAssembly:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+ case ZSTD_d_maxBlockSize:
+ bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN;
+ bounds.upperBound = ZSTD_BLOCKSIZE_MAX;
+ return bounds;
+
default:;
}
bounds.error = ERROR(parameter_unsupported);
@@ -1710,6 +1815,12 @@ size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value
case ZSTD_d_refMultipleDDicts:
*value = (int)dctx->refMultipleDDicts;
return 0;
+ case ZSTD_d_disableHuffmanAssembly:
+ *value = (int)dctx->disableHufAsm;
+ return 0;
+ case ZSTD_d_maxBlockSize:
+ *value = dctx->maxBlockSizeParam;
+ return 0;
default:;
}
RETURN_ERROR(parameter_unsupported, "");
@@ -1743,6 +1854,14 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value
}
dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
return 0;
+ case ZSTD_d_disableHuffmanAssembly:
+ CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value);
+ dctx->disableHufAsm = value != 0;
+ return 0;
+ case ZSTD_d_maxBlockSize:
+ if (value != 0) CHECK_DBOUNDS(ZSTD_d_maxBlockSize, value);
+ dctx->maxBlockSizeParam = value;
+ return 0;
default:;
}
RETURN_ERROR(parameter_unsupported, "");
@@ -1754,6 +1873,7 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
|| (reset == ZSTD_reset_session_and_parameters) ) {
dctx->streamStage = zdss_init;
dctx->noForwardProgress = 0;
+ dctx->isFrameDecompression = 1;
}
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
@@ -1770,11 +1890,17 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
return ZSTD_sizeof_DCtx(dctx);
}
-size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+static size_t ZSTD_decodingBufferSize_internal(unsigned long long windowSize, unsigned long long frameContentSize, size_t blockSizeMax)
{
- size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
- /* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/
- unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2);
+ size_t const blockSize = MIN((size_t)MIN(windowSize, ZSTD_BLOCKSIZE_MAX), blockSizeMax);
+ /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block
+ * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing
+ * the block at the beginning of the output buffer, and maintain a full window.
+ *
+ * We need another blockSize worth of buffer so that we can store split
+ * literals at the end of the block without overwriting the extDict window.
+ */
+ unsigned long long const neededRBSize = windowSize + (blockSize * 2) + (WILDCOPY_OVERLENGTH * 2);
unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
size_t const minRBSize = (size_t) neededSize;
RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
@@ -1782,6 +1908,11 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
return minRBSize;
}
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+ return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, ZSTD_BLOCKSIZE_MAX);
+}
+
size_t ZSTD_estimateDStreamSize(size_t windowSize)
{
size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
@@ -1793,7 +1924,7 @@ size_t ZSTD_estimateDStreamSize(size_t windowSize)
size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
{
U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
- ZSTD_frameHeader zfh;
+ ZSTD_FrameHeader zfh;
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(err)) return err;
RETURN_ERROR_IF(err>0, srcSize_wrong, "");
@@ -1888,6 +2019,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
U32 someMoreWork = 1;
DEBUGLOG(5, "ZSTD_decompressStream");
+ assert(zds != NULL);
RETURN_ERROR_IF(
input->pos > input->size,
srcSize_wrong,
@@ -1918,7 +2050,6 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if (zds->refMultipleDDicts && zds->ddictSet) {
ZSTD_DCtx_selectFrameDDict(zds);
}
- DEBUGLOG(5, "header size : %u", (U32)hSize);
if (ZSTD_isError(hSize)) {
return hSize; /* error */
}
@@ -1932,6 +2063,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
zds->lhSize += remainingInput;
}
input->pos = input->size;
+ /* check first few bytes */
+ FORWARD_IF_ERROR(
+ ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format),
+ "First few bytes detected incorrect" );
+ /* return hint input size */
return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
}
assert(ip != NULL);
@@ -1943,14 +2079,15 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& zds->fParams.frameType != ZSTD_skippableFrame
&& (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
- size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
+ size_t const cSize = ZSTD_findFrameCompressedSize_advanced(istart, (size_t)(iend-istart), zds->format);
if (cSize <= (size_t)(iend-istart)) {
/* shortcut : using single-pass mode */
size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
if (ZSTD_isError(decompressedSize)) return decompressedSize;
- DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+ DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()");
+ assert(istart != NULL);
ip = istart + cSize;
- op += decompressedSize;
+ op = op ? op + decompressedSize : op; /* can occur if frameContentSize = 0 (empty frame) */
zds->expected = 0;
zds->streamStage = zdss_init;
someMoreWork = 0;
@@ -1969,7 +2106,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
DEBUGLOG(4, "Consume header");
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
- if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ if (zds->format == ZSTD_f_zstd1
+ && (MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
zds->stage = ZSTDds_skipFrame;
} else {
@@ -1985,11 +2123,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
frameParameter_windowTooLarge, "");
+ if (zds->maxBlockSizeParam != 0)
+ zds->fParams.blockSizeMax = MIN(zds->fParams.blockSizeMax, (unsigned)zds->maxBlockSizeParam);
/* Adapt buffer sizes to frame header instructions */
{ size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
- ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
+ ? ZSTD_decodingBufferSize_internal(zds->fParams.windowSize, zds->fParams.frameContentSize, zds->fParams.blockSizeMax)
: 0;
ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
@@ -2034,6 +2174,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
}
if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
+ assert(ip != NULL);
ip += neededInSize;
/* Function modifies the stage so we must break */
break;
@@ -2048,7 +2189,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
int const isSkipFrame = ZSTD_isSkipFrame(zds);
size_t loadedSize;
/* At this point we shouldn't be decompressing a block that we can stream. */
- assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
+ assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip)));
if (isSkipFrame) {
loadedSize = MIN(toLoad, (size_t)(iend-ip));
} else {
@@ -2057,8 +2198,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
"should never happen");
loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
}
- ip += loadedSize;
- zds->inPos += loadedSize;
+ if (loadedSize != 0) {
+ /* ip may be NULL */
+ ip += loadedSize;
+ zds->inPos += loadedSize;
+ }
if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
/* decode loaded input */
@@ -2068,14 +2212,17 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
break;
}
case zdss_flush:
- { size_t const toFlushSize = zds->outEnd - zds->outStart;
+ {
+ size_t const toFlushSize = zds->outEnd - zds->outStart;
size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
- op += flushedSize;
+
+ op = op ? op + flushedSize : op;
+
zds->outStart += flushedSize;
if (flushedSize == toFlushSize) { /* flush completed */
zds->streamStage = zdss_read;
if ( (zds->outBuffSize < zds->fParams.frameContentSize)
- && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+ && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
(int)(zds->outBuffSize - zds->outStart),
(U32)zds->fParams.blockSizeMax);
@@ -2089,7 +2236,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
default:
assert(0); /* impossible */
- RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */
} }
/* result */
@@ -2102,8 +2249,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if ((ip==istart) && (op==ostart)) { /* no forward progress */
zds->noForwardProgress ++;
if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
- RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
- RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
+ RETURN_ERROR_IF(op==oend, noForwardProgress_destFull, "");
+ RETURN_ERROR_IF(ip==iend, noForwardProgress_inputEmpty, "");
assert(0);
}
} else {
@@ -2140,11 +2287,17 @@ size_t ZSTD_decompressStream_simpleArgs (
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos)
{
- ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
- ZSTD_inBuffer input = { src, srcSize, *srcPos };
- /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
- size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
- *dstPos = output.pos;
- *srcPos = input.pos;
- return cErr;
+ ZSTD_outBuffer output;
+ ZSTD_inBuffer input;
+ output.dst = dst;
+ output.size = dstCapacity;
+ output.pos = *dstPos;
+ input.src = src;
+ input.size = srcSize;
+ input.pos = *srcPos;
+ { size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+ }
}
diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
index c1913b8e7c89..710eb0ffd5a3 100644
--- a/lib/zstd/decompress/zstd_decompress_block.c
+++ b/lib/zstd/decompress/zstd_decompress_block.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -20,12 +21,12 @@
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/zstd_internal.h"
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h"
+#include "../common/bits.h" /* ZSTD_highbit32 */
/*_*******************************************************
* Macros
@@ -51,6 +52,13 @@ static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
* Block decoding
***************************************************************/
+static size_t ZSTD_blockSizeMax(ZSTD_DCtx const* dctx)
+{
+ size_t const blockSizeMax = dctx->isFrameDecompression ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX;
+ assert(blockSizeMax <= ZSTD_BLOCKSIZE_MAX);
+ return blockSizeMax;
+}
+
/*! ZSTD_getcBlockSize() :
* Provides the size of compressed block from block header `src` */
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
@@ -73,41 +81,49 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize,
const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately)
{
- if (streaming == not_streaming && dstCapacity > ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH)
- {
- /* room for litbuffer to fit without read faulting */
- dctx->litBuffer = (BYTE*)dst + ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH;
+ size_t const blockSizeMax = ZSTD_blockSizeMax(dctx);
+ assert(litSize <= blockSizeMax);
+ assert(dctx->isFrameDecompression || streaming == not_streaming);
+ assert(expectedWriteSize <= blockSizeMax);
+ if (streaming == not_streaming && dstCapacity > blockSizeMax + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) {
+ /* If we aren't streaming, we can just put the literals after the output
+ * of the current block. We don't need to worry about overwriting the
+ * extDict of our window, because it doesn't exist.
+ * So if we have space after the end of the block, just put it there.
+ */
+ dctx->litBuffer = (BYTE*)dst + blockSizeMax + WILDCOPY_OVERLENGTH;
dctx->litBufferEnd = dctx->litBuffer + litSize;
dctx->litBufferLocation = ZSTD_in_dst;
- }
- else if (litSize > ZSTD_LITBUFFEREXTRASIZE)
- {
- /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
+ } else if (litSize <= ZSTD_LITBUFFEREXTRASIZE) {
+ /* Literals fit entirely within the extra buffer, put them there to avoid
+ * having to split the literals.
+ */
+ dctx->litBuffer = dctx->litExtraBuffer;
+ dctx->litBufferEnd = dctx->litBuffer + litSize;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ } else {
+ assert(blockSizeMax > ZSTD_LITBUFFEREXTRASIZE);
+ /* Literals must be split between the output block and the extra lit
+ * buffer. We fill the extra lit buffer with the tail of the literals,
+ * and put the rest of the literals at the end of the block, with
+ * WILDCOPY_OVERLENGTH of buffer room to allow for overreads.
+ * This MUST not write more than our maxBlockSize beyond dst, because in
+ * streaming mode, that could overwrite part of our extDict window.
+ */
if (splitImmediately) {
/* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE;
- }
- else {
- /* initially this will be stored entirely in dst during huffman decoding, it will partially shifted to litExtraBuffer after */
+ } else {
+ /* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */
dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize;
dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize;
}
dctx->litBufferLocation = ZSTD_split;
- }
- else
- {
- /* fits entirely within litExtraBuffer, so no split is necessary */
- dctx->litBuffer = dctx->litExtraBuffer;
- dctx->litBufferEnd = dctx->litBuffer + litSize;
- dctx->litBufferLocation = ZSTD_not_in_dst;
+ assert(dctx->litBufferEnd <= (BYTE*)dst + expectedWriteSize);
}
}
-/* Hidden declaration for fullbench */
-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
- const void* src, size_t srcSize,
- void* dst, size_t dstCapacity, const streaming_operation streaming);
/*! ZSTD_decodeLiteralsBlock() :
* Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored
* in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current
@@ -116,7 +132,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
*
* @return : nb of bytes read from src (< srcSize )
* note : symbol not declared but exposed for fullbench */
-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */
void* dst, size_t dstCapacity, const streaming_operation streaming)
{
@@ -124,7 +140,8 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
{ const BYTE* const istart = (const BYTE*) src;
- symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+ SymbolEncodingType_e const litEncType = (SymbolEncodingType_e)(istart[0] & 3);
+ size_t const blockSizeMax = ZSTD_blockSizeMax(dctx);
switch(litEncType)
{
@@ -134,13 +151,16 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
ZSTD_FALLTHROUGH;
case set_compressed:
- RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
+ RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need up to 5 for case 3");
{ size_t lhSize, litSize, litCSize;
U32 singleStream=0;
U32 const lhlCode = (istart[0] >> 2) & 3;
U32 const lhc = MEM_readLE32(istart);
size_t hufSuccess;
- size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+ size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity);
+ int const flags = 0
+ | (ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0)
+ | (dctx->disableHufAsm ? HUF_flags_disableAsm : 0);
switch(lhlCode)
{
case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
@@ -164,7 +184,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
break;
}
RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
- RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+ RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, "");
+ if (!singleStream)
+ RETURN_ERROR_IF(litSize < MIN_LITERALS_FOR_4_STREAMS, literals_headerWrong,
+ "Not enough literals (%zu) for the 4-streams mode (min %u)",
+ litSize, MIN_LITERALS_FOR_4_STREAMS);
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, "");
ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0);
@@ -176,13 +200,14 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
if (litEncType==set_repeat) {
if (singleStream) {
- hufSuccess = HUF_decompress1X_usingDTable_bmi2(
+ hufSuccess = HUF_decompress1X_usingDTable(
dctx->litBuffer, litSize, istart+lhSize, litCSize,
- dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx));
+ dctx->HUFptr, flags);
} else {
- hufSuccess = HUF_decompress4X_usingDTable_bmi2(
+ assert(litSize >= MIN_LITERALS_FOR_4_STREAMS);
+ hufSuccess = HUF_decompress4X_usingDTable(
dctx->litBuffer, litSize, istart+lhSize, litCSize,
- dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx));
+ dctx->HUFptr, flags);
}
} else {
if (singleStream) {
@@ -190,26 +215,28 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
hufSuccess = HUF_decompress1X_DCtx_wksp(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
- sizeof(dctx->workspace));
+ sizeof(dctx->workspace), flags);
#else
- hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
+ hufSuccess = HUF_decompress1X1_DCtx_wksp(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
- sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx));
+ sizeof(dctx->workspace), flags);
#endif
} else {
- hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
+ hufSuccess = HUF_decompress4X_hufOnly_wksp(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
- sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx));
+ sizeof(dctx->workspace), flags);
}
}
if (dctx->litBufferLocation == ZSTD_split)
{
+ assert(litSize > ZSTD_LITBUFFEREXTRASIZE);
ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE);
dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
dctx->litBufferEnd -= WILDCOPY_OVERLENGTH;
+ assert(dctx->litBufferEnd <= (BYTE*)dst + blockSizeMax);
}
RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
@@ -224,7 +251,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
case set_basic:
{ size_t litSize, lhSize;
U32 const lhlCode = ((istart[0]) >> 2) & 3;
- size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+ size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity);
switch(lhlCode)
{
case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
@@ -237,11 +264,13 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
break;
case 3:
lhSize = 3;
+ RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize = 3");
litSize = MEM_readLE24(istart) >> 4;
break;
}
RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, "");
RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
@@ -270,7 +299,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
case set_rle:
{ U32 const lhlCode = ((istart[0]) >> 2) & 3;
size_t litSize, lhSize;
- size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+ size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity);
switch(lhlCode)
{
case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
@@ -279,16 +308,17 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
break;
case 1:
lhSize = 2;
+ RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 3");
litSize = MEM_readLE16(istart) >> 4;
break;
case 3:
lhSize = 3;
+ RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 4");
litSize = MEM_readLE24(istart) >> 4;
- RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
break;
}
RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
- RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+ RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, "");
RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
if (dctx->litBufferLocation == ZSTD_split)
@@ -310,6 +340,18 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
}
}
+/* Hidden declaration for fullbench */
+size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize,
+ void* dst, size_t dstCapacity);
+size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize,
+ void* dst, size_t dstCapacity)
+{
+ dctx->isFrameDecompression = 0;
+ return ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, not_streaming);
+}
+
/* Default FSE distribution tables.
* These are pre-calculated FSE decoding tables using default distributions as defined in specification :
* https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
@@ -317,7 +359,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
* - start from default distributions, present in /lib/common/zstd_internal.h
* - generate tables normally, using ZSTD_buildFSETable()
* - printout the content of tables
- * - pretify output, report below, test with fuzzer to ensure it's correct */
+ * - prettify output, report below, test with fuzzer to ensure it's correct */
/* Default FSE distribution table for Literal Lengths */
static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
@@ -506,14 +548,15 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
- pos += n;
+ assert(n>=0);
+ pos += (size_t)n;
}
}
/* Now we spread those positions across the table.
- * The benefit of doing it in two stages is that we avoid the the
+ * The benefit of doing it in two stages is that we avoid the
* variable size inner loop, which caused lots of branch misses.
* Now we can run through all the positions without any branch misses.
- * We unroll the loop twice, since that is what emperically worked best.
+ * We unroll the loop twice, since that is what empirically worked best.
*/
{
size_t position = 0;
@@ -540,7 +583,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
for (i=0; i<n; i++) {
tableDecode[position].baseValue = s;
position = (position + step) & tableMask;
- while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ while (UNLIKELY(position > highThreshold)) position = (position + step) & tableMask; /* lowprob area */
} }
assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
}
@@ -551,7 +594,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
for (u=0; u<tableSize; u++) {
U32 const symbol = tableDecode[u].baseValue;
U32 const nextState = symbolNext[symbol]++;
- tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].nbBits = (BYTE) (tableLog - ZSTD_highbit32(nextState) );
tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
assert(nbAdditionalBits[symbol] < 255);
tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol];
@@ -603,7 +646,7 @@ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
* @return : nb bytes read from src,
* or an error code if it fails */
static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
- symbolEncodingType_e type, unsigned max, U32 maxLog,
+ SymbolEncodingType_e type, unsigned max, U32 maxLog,
const void* src, size_t srcSize,
const U32* baseValue, const U8* nbAdditionalBits,
const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
@@ -664,11 +707,6 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
/* SeqHead */
nbSeq = *ip++;
- if (!nbSeq) {
- *nbSeqPtr=0;
- RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
- return 1;
- }
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
@@ -681,11 +719,19 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
}
*nbSeqPtr = nbSeq;
+ if (nbSeq == 0) {
+ /* No sequence : section ends immediately */
+ RETURN_ERROR_IF(ip != iend, corruption_detected,
+ "extraneous data present in the Sequences section");
+ return (size_t)(ip - istart);
+ }
+
/* FSE table descriptors */
RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
- { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
- symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
- symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+ RETURN_ERROR_IF(*ip & 3, corruption_detected, ""); /* The last field, Reserved, must be all-zeroes. */
+ { SymbolEncodingType_e const LLtype = (SymbolEncodingType_e)(*ip >> 6);
+ SymbolEncodingType_e const OFtype = (SymbolEncodingType_e)((*ip >> 4) & 3);
+ SymbolEncodingType_e const MLtype = (SymbolEncodingType_e)((*ip >> 2) & 3);
ip++;
/* Build DTables */
@@ -829,7 +875,7 @@ static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, pt
/* ZSTD_safecopyDstBeforeSrc():
* This version allows overlap with dst before src, or handles the non-overlap case with dst after src
* Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
-static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length) {
+static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, ptrdiff_t length) {
ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length;
@@ -858,6 +904,7 @@ static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length
* to be optimized for many small sequences, since those fall into ZSTD_execSequence().
*/
FORCE_NOINLINE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequenceEnd(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
@@ -905,6 +952,7 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
* This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case.
*/
FORCE_NOINLINE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
@@ -950,6 +998,7 @@ size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
}
HINT_INLINE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequence(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
@@ -964,6 +1013,11 @@ size_t ZSTD_execSequence(BYTE* op,
assert(op != NULL /* Precondition */);
assert(oend_w < oend /* No underflow */);
+
+#if defined(__aarch64__)
+ /* prefetch sequence starting from match that will be used for copy later */
+ PREFETCH_L1(match);
+#endif
/* Handle edge cases in a slow path:
* - Read beyond end of literals
* - Match end is within WILDCOPY_OVERLIMIT of oend
@@ -1043,6 +1097,7 @@ size_t ZSTD_execSequence(BYTE* op,
}
HINT_INLINE
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
@@ -1154,7 +1209,7 @@ ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16
}
/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
- * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * offset bits. But we can only read at most STREAM_ACCUMULATOR_MIN_32
* bits before reloading. This value is the maximum number of bytes we read
* after reloading when we are decoding long offsets.
*/
@@ -1165,13 +1220,37 @@ ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16
typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+/*
+ * ZSTD_decodeSequence():
+ * @p longOffsets : tells the decoder to reload more bit while decoding large offsets
+ * only used in 32-bit mode
+ * @return : Sequence (litL + matchL + offset)
+ */
FORCE_INLINE_TEMPLATE seq_t
-ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const int isLastSeq)
{
seq_t seq;
+ /*
+ * ZSTD_seqSymbol is a 64 bits wide structure.
+ * It can be loaded in one operation
+ * and its fields extracted by simply shifting or bit-extracting on aarch64.
+ * GCC doesn't recognize this and generates more unnecessary ldr/ldrb/ldrh
+ * operations that cause performance drop. This can be avoided by using this
+ * ZSTD_memcpy hack.
+ */
+#if defined(__aarch64__) && (defined(__GNUC__) && !defined(__clang__))
+ ZSTD_seqSymbol llDInfoS, mlDInfoS, ofDInfoS;
+ ZSTD_seqSymbol* const llDInfo = &llDInfoS;
+ ZSTD_seqSymbol* const mlDInfo = &mlDInfoS;
+ ZSTD_seqSymbol* const ofDInfo = &ofDInfoS;
+ ZSTD_memcpy(llDInfo, seqState->stateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol));
+ ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol));
+ ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol));
+#else
const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state;
const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state;
const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state;
+#endif
seq.matchLength = mlDInfo->baseValue;
seq.litLength = llDInfo->baseValue;
{ U32 const ofBase = ofDInfo->baseValue;
@@ -1186,28 +1265,31 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
U32 const llnbBits = llDInfo->nbBits;
U32 const mlnbBits = mlDInfo->nbBits;
U32 const ofnbBits = ofDInfo->nbBits;
+
+ assert(llBits <= MaxLLBits);
+ assert(mlBits <= MaxMLBits);
+ assert(ofBits <= MaxOff);
/*
* As gcc has better branch and block analyzers, sometimes it is only
- * valuable to mark likelyness for clang, it gives around 3-4% of
+ * valuable to mark likeliness for clang, it gives around 3-4% of
* performance.
*/
/* sequence */
{ size_t offset;
- #if defined(__clang__)
- if (LIKELY(ofBits > 1)) {
- #else
if (ofBits > 1) {
- #endif
ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
- assert(ofBits <= MaxOff);
+ ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32);
+ ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits);
if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
- U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
+ /* Always read extra bits, this keeps the logic simple,
+ * avoids branches, and avoids accidentally reading 0 bits.
+ */
+ U32 const extraBits = LONG_OFFSETS_MAX_EXTRA_BITS_32;
offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
BIT_reloadDStream(&seqState->DStream);
- if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
- assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
+ offset += BIT_readBitsFast(&seqState->DStream, extraBits);
} else {
offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
@@ -1224,7 +1306,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
} else {
offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
{ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
- temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+ temp -= !temp; /* 0 is not valid: input corrupted => force offset to -1 => corruption detected at execSequence */
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset = temp;
@@ -1232,11 +1314,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
seq.offset = offset;
}
- #if defined(__clang__)
- if (UNLIKELY(mlBits > 0))
- #else
if (mlBits > 0)
- #endif
seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
@@ -1246,11 +1324,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
/* Ensure there are enough bits to read the rest of data in 64-bit mode. */
ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
- #if defined(__clang__)
- if (UNLIKELY(llBits > 0))
- #else
if (llBits > 0)
- #endif
seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
if (MEM_32bits())
@@ -1259,17 +1333,22 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
(U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
- ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */
- ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */
- if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
- ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */
+ if (!isLastSeq) {
+ /* don't update FSE state for last Sequence */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */
+ BIT_reloadDStream(&seqState->DStream);
+ }
}
return seq;
}
-#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
-MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+#if DEBUGLEVEL >= 1
+static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
{
size_t const windowSize = dctx->fParams.windowSize;
/* No dictionary used. */
@@ -1283,30 +1362,33 @@ MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefix
/* Dictionary is active. */
return 1;
}
+#endif
-MEM_STATIC void ZSTD_assertValidSequence(
+static void ZSTD_assertValidSequence(
ZSTD_DCtx const* dctx,
BYTE const* op, BYTE const* oend,
seq_t const seq,
BYTE const* prefixStart, BYTE const* virtualStart)
{
#if DEBUGLEVEL >= 1
- size_t const windowSize = dctx->fParams.windowSize;
- size_t const sequenceSize = seq.litLength + seq.matchLength;
- BYTE const* const oLitEnd = op + seq.litLength;
- DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
- (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
- assert(op <= oend);
- assert((size_t)(oend - op) >= sequenceSize);
- assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
- if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
- size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
- /* Offset must be within the dictionary. */
- assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
- assert(seq.offset <= windowSize + dictSize);
- } else {
- /* Offset must be within our window. */
- assert(seq.offset <= windowSize);
+ if (dctx->isFrameDecompression) {
+ size_t const windowSize = dctx->fParams.windowSize;
+ size_t const sequenceSize = seq.litLength + seq.matchLength;
+ BYTE const* const oLitEnd = op + seq.litLength;
+ DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+ assert(op <= oend);
+ assert((size_t)(oend - op) >= sequenceSize);
+ assert(sequenceSize <= ZSTD_blockSizeMax(dctx));
+ if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
+ size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
+ /* Offset must be within the dictionary. */
+ assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
+ assert(seq.offset <= windowSize + dictSize);
+ } else {
+ /* Offset must be within our window. */
+ assert(seq.offset <= windowSize);
+ }
}
#else
(void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
@@ -1322,23 +1404,21 @@ DONT_VECTORIZE
ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = ostart + maxDstSize;
+ BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd;
const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
- DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer");
- (void)frame;
+ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer (%i seqs)", nbSeq);
- /* Regen sequences */
+ /* Literals are split between internal buffer & output buffer */
if (nbSeq) {
seqState_t seqState;
dctx->fseEntropy = 1;
@@ -1357,8 +1437,7 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
BIT_DStream_completed < BIT_DStream_overflow);
/* decompress without overrunning litPtr begins */
- {
- seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ { seq_t sequence = {0,0,0}; /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */
/* Align the decompression loop to 32 + 16 bytes.
*
* zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
@@ -1420,27 +1499,26 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
#endif
/* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */
- for (; litPtr + sequence.litLength <= dctx->litBufferEnd; ) {
- size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+ for ( ; nbSeq; nbSeq--) {
+ sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1);
+ if (litPtr + sequence.litLength > dctx->litBufferEnd) break;
+ { size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
- assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
- if (UNLIKELY(ZSTD_isError(oneSeqSize)))
- return oneSeqSize;
- DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
- op += oneSeqSize;
- if (UNLIKELY(!--nbSeq))
- break;
- BIT_reloadDStream(&(seqState.DStream));
- sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
- }
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ } }
+ DEBUGLOG(6, "reached: (litPtr + sequence.litLength > dctx->litBufferEnd)");
/* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */
if (nbSeq > 0) {
const size_t leftoverLit = dctx->litBufferEnd - litPtr;
- if (leftoverLit)
- {
+ DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength);
+ if (leftoverLit) {
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequence.litLength -= leftoverLit;
@@ -1449,24 +1527,22 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
- {
- size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+ { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
- if (--nbSeq)
- BIT_reloadDStream(&(seqState.DStream));
}
+ nbSeq--;
}
}
- if (nbSeq > 0) /* there is remaining lit from extra buffer */
- {
+ if (nbSeq > 0) {
+ /* there is remaining lit from extra buffer */
#if defined(__x86_64__)
__asm__(".p2align 6");
@@ -1485,35 +1561,34 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
# endif
#endif
- for (; ; ) {
- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ for ( ; nbSeq ; nbSeq--) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
- if (UNLIKELY(!--nbSeq))
- break;
- BIT_reloadDStream(&(seqState.DStream));
}
}
/* check if reached exact end */
DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq);
RETURN_ERROR_IF(nbSeq, corruption_detected, "");
- RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
+ DEBUGLOG(5, "bitStream : start=%p, ptr=%p, bitsConsumed=%u", seqState.DStream.start, seqState.DStream.ptr, seqState.DStream.bitsConsumed);
+ RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, "");
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
- if (dctx->litBufferLocation == ZSTD_split) /* split hasn't been reached yet, first get dst then copy litExtraBuffer */
- {
- size_t const lastLLSize = litBufferEnd - litPtr;
+ if (dctx->litBufferLocation == ZSTD_split) {
+ /* split hasn't been reached yet, first get dst then copy litExtraBuffer */
+ size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
+ DEBUGLOG(6, "copy last literals from segment : %u", (U32)lastLLSize);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize);
@@ -1523,15 +1598,17 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
}
- { size_t const lastLLSize = litBufferEnd - litPtr;
+ /* copy last literals from internal buffer */
+ { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
+ DEBUGLOG(6, "copy last literals from internal buffer : %u", (U32)lastLLSize);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
- }
- }
+ } }
- return op-ostart;
+ DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart));
+ return (size_t)(op - ostart);
}
FORCE_INLINE_TEMPLATE size_t
@@ -1539,21 +1616,19 @@ DONT_VECTORIZE
ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer;
+ BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, maxDstSize) : dctx->litBuffer;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart);
const BYTE* const vBase = (const BYTE*)(dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd);
- DEBUGLOG(5, "ZSTD_decompressSequences_body");
- (void)frame;
+ DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq);
/* Regen sequences */
if (nbSeq) {
@@ -1568,11 +1643,6 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
assert(dst != NULL);
- ZSTD_STATIC_ASSERT(
- BIT_DStream_unfinished < BIT_DStream_completed &&
- BIT_DStream_endOfBuffer < BIT_DStream_completed &&
- BIT_DStream_completed < BIT_DStream_overflow);
-
#if defined(__x86_64__)
__asm__(".p2align 6");
__asm__("nop");
@@ -1587,73 +1657,70 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
# endif
#endif
- for ( ; ; ) {
- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ for ( ; nbSeq ; nbSeq--) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
- if (UNLIKELY(!--nbSeq))
- break;
- BIT_reloadDStream(&(seqState.DStream));
}
/* check if reached exact end */
- DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
- RETURN_ERROR_IF(nbSeq, corruption_detected, "");
- RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
+ assert(nbSeq == 0);
+ RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, "");
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
- { size_t const lastLLSize = litEnd - litPtr;
+ { size_t const lastLLSize = (size_t)(litEnd - litPtr);
+ DEBUGLOG(6, "copy last literals : %u", (U32)lastLLSize);
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
- }
- }
+ } }
- return op-ostart;
+ DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart));
+ return (size_t)(op - ostart);
}
static size_t
ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
static size_t
ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
+FORCE_INLINE_TEMPLATE
+
+size_t ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
const BYTE* const prefixStart, const BYTE* const dictEnd)
{
prefetchPos += sequence.litLength;
{ const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
- const BYTE* const match = matchBase + prefetchPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
- * No consequence though : memory address is only used for prefetching, not for dereferencing */
+ /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+ * No consequence though : memory address is only used for prefetching, not for dereferencing */
+ const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, prefetchPos), sequence.offset);
PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
}
return prefetchPos + sequence.matchLength;
@@ -1668,20 +1735,18 @@ ZSTD_decompressSequencesLong_body(
ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize;
+ BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd;
const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
- (void)frame;
/* Regen sequences */
if (nbSeq) {
@@ -1706,20 +1771,17 @@ ZSTD_decompressSequencesLong_body(
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
/* prepare in advance */
- for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ for (seqNb=0; seqNb<seqAdvance; seqNb++) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1);
prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
sequences[seqNb] = sequence;
}
- RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
/* decompress without stomping litBuffer */
- for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb < nbSeq); seqNb++) {
- seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
- size_t oneSeqSize;
+ for (; seqNb < nbSeq; seqNb++) {
+ seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1);
- if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd)
- {
+ if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) {
/* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */
const size_t leftoverLit = dctx->litBufferEnd - litPtr;
if (leftoverLit)
@@ -1732,26 +1794,26 @@ ZSTD_decompressSequencesLong_body(
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
- oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+ { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
- assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+ assert(!ZSTD_isError(oneSeqSize));
+ ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
#endif
- if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
- prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
- sequences[seqNb & STORED_SEQS_MASK] = sequence;
- op += oneSeqSize;
- }
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb & STORED_SEQS_MASK] = sequence;
+ op += oneSeqSize;
+ } }
else
{
/* lit buffer is either wholly contained in first or second split, or not split at all*/
- oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
+ size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+ ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
@@ -1760,17 +1822,15 @@ ZSTD_decompressSequencesLong_body(
op += oneSeqSize;
}
}
- RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
+ RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, "");
/* finish queue */
seqNb -= seqAdvance;
for ( ; seqNb<nbSeq ; seqNb++) {
seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]);
- if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd)
- {
+ if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) {
const size_t leftoverLit = dctx->litBufferEnd - litPtr;
- if (leftoverLit)
- {
+ if (leftoverLit) {
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequence->litLength -= leftoverLit;
@@ -1779,11 +1839,10 @@ ZSTD_decompressSequencesLong_body(
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
- {
- size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+ { size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+ ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
@@ -1796,7 +1855,7 @@ ZSTD_decompressSequencesLong_body(
ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+ ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
@@ -1808,8 +1867,7 @@ ZSTD_decompressSequencesLong_body(
}
/* last literal segment */
- if (dctx->litBufferLocation == ZSTD_split) /* first deplete literal buffer in dst, then copy litExtraBuffer */
- {
+ if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */
size_t const lastLLSize = litBufferEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
if (op != NULL) {
@@ -1827,17 +1885,16 @@ ZSTD_decompressSequencesLong_body(
}
}
- return op-ostart;
+ return (size_t)(op - ostart);
}
static size_t
ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
@@ -1851,20 +1908,18 @@ DONT_VECTORIZE
ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
static BMI2_TARGET_ATTRIBUTE size_t
DONT_VECTORIZE
ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
@@ -1873,50 +1928,40 @@ static BMI2_TARGET_ATTRIBUTE size_t
ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
- return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
#endif /* DYNAMIC_BMI2 */
-typedef size_t (*ZSTD_decompressSequences_t)(
- ZSTD_DCtx* dctx,
- void* dst, size_t maxDstSize,
- const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame);
-
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
static size_t
ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
DEBUGLOG(5, "ZSTD_decompressSequences");
#if DYNAMIC_BMI2
if (ZSTD_DCtx_get_bmi2(dctx)) {
- return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif
- return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
static size_t
ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer");
#if DYNAMIC_BMI2
if (ZSTD_DCtx_get_bmi2(dctx)) {
- return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif
- return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
@@ -1931,69 +1976,114 @@ static size_t
ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
- const ZSTD_longOffset_e isLongOffset,
- const int frame)
+ const ZSTD_longOffset_e isLongOffset)
{
DEBUGLOG(5, "ZSTD_decompressSequencesLong");
#if DYNAMIC_BMI2
if (ZSTD_DCtx_get_bmi2(dctx)) {
- return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif
- return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+/*
+ * @returns The total size of the history referenceable by zstd, including
+ * both the prefix and the extDict. At @p op any offset larger than this
+ * is invalid.
+ */
+static size_t ZSTD_totalHistorySize(BYTE* op, BYTE const* virtualStart)
+{
+ return (size_t)(op - virtualStart);
+}
+
+typedef struct {
+ unsigned longOffsetShare;
+ unsigned maxNbAdditionalBits;
+} ZSTD_OffsetInfo;
-#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
- !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
-/* ZSTD_getLongOffsetsShare() :
+/* ZSTD_getOffsetInfo() :
* condition : offTable must be valid
* @return : "share" of long offsets (arbitrarily defined as > (1<<23))
- * compared to maximum possible of (1<<OffFSELog) */
-static unsigned
-ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
+ * compared to maximum possible of (1<<OffFSELog),
+ * as well as the maximum number additional bits required.
+ */
+static ZSTD_OffsetInfo
+ZSTD_getOffsetInfo(const ZSTD_seqSymbol* offTable, int nbSeq)
{
- const void* ptr = offTable;
- U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
- const ZSTD_seqSymbol* table = offTable + 1;
- U32 const max = 1 << tableLog;
- U32 u, total = 0;
- DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
-
- assert(max <= (1 << OffFSELog)); /* max not too large */
- for (u=0; u<max; u++) {
- if (table[u].nbAdditionalBits > 22) total += 1;
+ ZSTD_OffsetInfo info = {0, 0};
+ /* If nbSeq == 0, then the offTable is uninitialized, but we have
+ * no sequences, so both values should be 0.
+ */
+ if (nbSeq != 0) {
+ const void* ptr = offTable;
+ U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
+ const ZSTD_seqSymbol* table = offTable + 1;
+ U32 const max = 1 << tableLog;
+ U32 u;
+ DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
+
+ assert(max <= (1 << OffFSELog)); /* max not too large */
+ for (u=0; u<max; u++) {
+ info.maxNbAdditionalBits = MAX(info.maxNbAdditionalBits, table[u].nbAdditionalBits);
+ if (table[u].nbAdditionalBits > 22) info.longOffsetShare += 1;
+ }
+
+ assert(tableLog <= OffFSELog);
+ info.longOffsetShare <<= (OffFSELog - tableLog); /* scale to OffFSELog */
}
- assert(tableLog <= OffFSELog);
- total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
+ return info;
+}
- return total;
+/*
+ * @returns The maximum offset we can decode in one read of our bitstream, without
+ * reloading more bits in the middle of the offset bits read. Any offsets larger
+ * than this must use the long offset decoder.
+ */
+static size_t ZSTD_maxShortOffset(void)
+{
+ if (MEM_64bits()) {
+ /* We can decode any offset without reloading bits.
+ * This might change if the max window size grows.
+ */
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+ return (size_t)-1;
+ } else {
+ /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1.
+ * This offBase would require STREAM_ACCUMULATOR_MIN extra bits.
+ * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset.
+ */
+ size_t const maxOffbase = ((size_t)1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1;
+ size_t const maxOffset = maxOffbase - ZSTD_REP_NUM;
+ assert(ZSTD_highbit32((U32)maxOffbase) == STREAM_ACCUMULATOR_MIN);
+ return maxOffset;
+ }
}
-#endif
size_t
ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
- const void* src, size_t srcSize, const int frame, const streaming_operation streaming)
+ const void* src, size_t srcSize, const streaming_operation streaming)
{ /* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
- /* isLongOffset must be true if there are long offsets.
- * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
- * We don't expect that to be the case in 64-bit mode.
- * In block mode, window size is not known, so we have to be conservative.
- * (note: but it could be evaluated from current-lowLimit)
- */
- ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
- DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
-
- RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
+ DEBUGLOG(5, "ZSTD_decompressBlock_internal (cSize : %u)", (unsigned)srcSize);
+
+ /* Note : the wording of the specification
+ * allows compressed block to be sized exactly ZSTD_blockSizeMax(dctx).
+ * This generally does not happen, as it makes little sense,
+ * since an uncompressed block would feature same size and have no decompression cost.
+ * Also, note that decoder from reference libzstd before < v1.5.4
+ * would consider this edge case as an error.
+ * As a consequence, avoid generating compressed blocks of size ZSTD_blockSizeMax(dctx)
+ * for broader compatibility with the deployed ecosystem of zstd decoders */
+ RETURN_ERROR_IF(srcSize > ZSTD_blockSizeMax(dctx), srcSize_wrong, "");
/* Decode literals section */
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming);
- DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : cSize=%u, nbLiterals=%zu", (U32)litCSize, dctx->litSize);
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
@@ -2001,6 +2091,23 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
/* Build Decoding Tables */
{
+ /* Compute the maximum block size, which must also work when !frame and fParams are unset.
+ * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t.
+ */
+ size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx));
+ size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((BYTE*)dst, blockSizeMax), (BYTE const*)dctx->virtualStart);
+ /* isLongOffset must be true if there are long offsets.
+ * Offsets are long if they are larger than ZSTD_maxShortOffset().
+ * We don't expect that to be the case in 64-bit mode.
+ *
+ * We check here to see if our history is large enough to allow long offsets.
+ * If it isn't, then we can't possible have (valid) long offsets. If the offset
+ * is invalid, then it is okay to read it incorrectly.
+ *
+ * If isLongOffsets is true, then we will later check our decoding table to see
+ * if it is even possible to generate long offsets.
+ */
+ ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (totalHistorySize > ZSTD_maxShortOffset()));
/* These macros control at build-time which decompressor implementation
* we use. If neither is defined, we do some inspection and dispatch at
* runtime.
@@ -2008,6 +2115,11 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
int usePrefetchDecoder = dctx->ddictIsCold;
+#else
+ /* Set to 1 to avoid computing offset info if we don't need to.
+ * Otherwise this value is ignored.
+ */
+ int usePrefetchDecoder = 1;
#endif
int nbSeq;
size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
@@ -2015,40 +2127,55 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
ip += seqHSize;
srcSize -= seqHSize;
- RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF((dst == NULL || dstCapacity == 0) && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(MEM_64bits() && sizeof(size_t) == sizeof(void*) && (size_t)(-1) - (size_t)dst < (size_t)(1 << 20), dstSize_tooSmall,
+ "invalid dst");
-#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
- !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
- if ( !usePrefetchDecoder
- && (!frame || (dctx->fParams.windowSize > (1<<24)))
- && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
- U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
- U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
- usePrefetchDecoder = (shareLongOffsets >= minShare);
+ /* If we could potentially have long offsets, or we might want to use the prefetch decoder,
+ * compute information about the share of long offsets, and the maximum nbAdditionalBits.
+ * NOTE: could probably use a larger nbSeq limit
+ */
+ if (isLongOffset || (!usePrefetchDecoder && (totalHistorySize > (1u << 24)) && (nbSeq > 8))) {
+ ZSTD_OffsetInfo const info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq);
+ if (isLongOffset && info.maxNbAdditionalBits <= STREAM_ACCUMULATOR_MIN) {
+ /* If isLongOffset, but the maximum number of additional bits that we see in our table is small
+ * enough, then we know it is impossible to have too long an offset in this block, so we can
+ * use the regular offset decoder.
+ */
+ isLongOffset = ZSTD_lo_isRegularOffset;
+ }
+ if (!usePrefetchDecoder) {
+ U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
+ usePrefetchDecoder = (info.longOffsetShare >= minShare);
+ }
}
-#endif
dctx->ddictIsCold = 0;
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
- if (usePrefetchDecoder)
+ if (usePrefetchDecoder) {
+#else
+ (void)usePrefetchDecoder;
+ {
#endif
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
- return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
#endif
+ }
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
/* else */
if (dctx->litBufferLocation == ZSTD_split)
- return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
else
- return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
#endif
}
}
+ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
{
if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */
@@ -2060,13 +2187,24 @@ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
}
-size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
+size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
{
size_t dSize;
+ dctx->isFrameDecompression = 0;
ZSTD_checkContinuity(dctx, dst, dstCapacity);
- dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0, not_streaming);
+ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, not_streaming);
+ FORWARD_IF_ERROR(dSize, "");
dctx->previousDstEnd = (char*)dst + dSize;
return dSize;
}
+
+
+/* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize);
+}
diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
index 3d2d57a5d25a..becffbd89364 100644
--- a/lib/zstd/decompress/zstd_decompress_block.h
+++ b/lib/zstd/decompress/zstd_decompress_block.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -47,7 +48,7 @@ typedef enum {
*/
size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
- const void* src, size_t srcSize, const int frame, const streaming_operation streaming);
+ const void* src, size_t srcSize, const streaming_operation streaming);
/* ZSTD_buildFSETable() :
* generate FSE decoding table for one symbol (ll, ml or off)
@@ -64,5 +65,10 @@ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
unsigned tableLog, void* wksp, size_t wkspSize,
int bmi2);
+/* Internal definition of ZSTD_decompressBlock() to avoid deprecation warnings. */
+size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
#endif /* ZSTD_DEC_BLOCK_H */
diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
index 98102edb6a83..2a225d1811c4 100644
--- a/lib/zstd/decompress/zstd_decompress_internal.h
+++ b/lib/zstd/decompress/zstd_decompress_internal.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -75,12 +76,13 @@ static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
+#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12
typedef struct {
ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
- HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
+ HUF_DTable hufTable[HUF_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; /* can accommodate HUF_decompress4X */
U32 rep[ZSTD_REP_NUM];
U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
} ZSTD_entropyDTables_t;
@@ -135,7 +137,7 @@ struct ZSTD_DCtx_s
const void* virtualStart; /* virtual start of previous segment if it was just before current one */
const void* dictEnd; /* end of previous segment */
size_t expected;
- ZSTD_frameHeader fParams;
+ ZSTD_FrameHeader fParams;
U64 processedCSize;
U64 decodedSize;
blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
@@ -152,7 +154,8 @@ struct ZSTD_DCtx_s
size_t litSize;
size_t rleSize;
size_t staticSize;
-#if DYNAMIC_BMI2 != 0
+ int isFrameDecompression;
+#if DYNAMIC_BMI2
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
#endif
@@ -164,6 +167,8 @@ struct ZSTD_DCtx_s
ZSTD_dictUses_e dictUses;
ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */
ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
+ int disableHufAsm;
+ int maxBlockSizeParam;
/* streaming */
ZSTD_dStreamStage streamStage;
@@ -199,11 +204,11 @@ struct ZSTD_DCtx_s
}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) {
-#if DYNAMIC_BMI2 != 0
- return dctx->bmi2;
+#if DYNAMIC_BMI2
+ return dctx->bmi2;
#else
(void)dctx;
- return 0;
+ return 0;
#endif
}
diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h
index a06ca187aab5..8a47eb2a4514 100644
--- a/lib/zstd/decompress_sources.h
+++ b/lib/zstd/decompress_sources.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/zstd_common_module.c b/lib/zstd/zstd_common_module.c
index 22686e367e6f..466828e35752 100644
--- a/lib/zstd/zstd_common_module.c
+++ b/lib/zstd/zstd_common_module.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -24,9 +24,6 @@ EXPORT_SYMBOL_GPL(HUF_readStats_wksp);
EXPORT_SYMBOL_GPL(ZSTD_isError);
EXPORT_SYMBOL_GPL(ZSTD_getErrorName);
EXPORT_SYMBOL_GPL(ZSTD_getErrorCode);
-EXPORT_SYMBOL_GPL(ZSTD_customMalloc);
-EXPORT_SYMBOL_GPL(ZSTD_customCalloc);
-EXPORT_SYMBOL_GPL(ZSTD_customFree);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Zstd Common");
diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
index bd8784449b31..7651b53551c8 100644
--- a/lib/zstd/zstd_compress_module.c
+++ b/lib/zstd/zstd_compress_module.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -16,6 +16,7 @@
#include "common/zstd_deps.h"
#include "common/zstd_internal.h"
+#include "compress/zstd_compress_internal.h"
#define ZSTD_FORWARD_IF_ERR(ret) \
do { \
@@ -92,12 +93,64 @@ zstd_compression_parameters zstd_get_cparams(int level,
}
EXPORT_SYMBOL(zstd_get_cparams);
+size_t zstd_cctx_set_param(zstd_cctx *cctx, ZSTD_cParameter param, int value)
+{
+ return ZSTD_CCtx_setParameter(cctx, param, value);
+}
+EXPORT_SYMBOL(zstd_cctx_set_param);
+
size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCCtxSize_usingCParams(*cparams);
}
EXPORT_SYMBOL(zstd_cctx_workspace_bound);
+// Used by zstd_cctx_workspace_bound_with_ext_seq_prod()
+static size_t dummy_external_sequence_producer(
+ void *sequenceProducerState,
+ ZSTD_Sequence *outSeqs, size_t outSeqsCapacity,
+ const void *src, size_t srcSize,
+ const void *dict, size_t dictSize,
+ int compressionLevel,
+ size_t windowSize)
+{
+ (void)sequenceProducerState;
+ (void)outSeqs; (void)outSeqsCapacity;
+ (void)src; (void)srcSize;
+ (void)dict; (void)dictSize;
+ (void)compressionLevel;
+ (void)windowSize;
+ return ZSTD_SEQUENCE_PRODUCER_ERROR;
+}
+
+static void init_cctx_params_from_compress_params(
+ ZSTD_CCtx_params *cctx_params,
+ const zstd_compression_parameters *compress_params)
+{
+ ZSTD_parameters zstd_params;
+ memset(&zstd_params, 0, sizeof(zstd_params));
+ zstd_params.cParams = *compress_params;
+ ZSTD_CCtxParams_init_advanced(cctx_params, zstd_params);
+}
+
+size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params)
+{
+ ZSTD_CCtx_params cctx_params;
+ init_cctx_params_from_compress_params(&cctx_params, compress_params);
+ ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer);
+ return ZSTD_estimateCCtxSize_usingCCtxParams(&cctx_params);
+}
+EXPORT_SYMBOL(zstd_cctx_workspace_bound_with_ext_seq_prod);
+
+size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params)
+{
+ ZSTD_CCtx_params cctx_params;
+ init_cctx_params_from_compress_params(&cctx_params, compress_params);
+ ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer);
+ return ZSTD_estimateCStreamSize_usingCCtxParams(&cctx_params);
+}
+EXPORT_SYMBOL(zstd_cstream_workspace_bound_with_ext_seq_prod);
+
zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
{
if (workspace == NULL)
@@ -209,5 +262,25 @@ size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
}
EXPORT_SYMBOL(zstd_end_stream);
+void zstd_register_sequence_producer(
+ zstd_cctx *cctx,
+ void* sequence_producer_state,
+ zstd_sequence_producer_f sequence_producer
+) {
+ ZSTD_registerSequenceProducer(cctx, sequence_producer_state, sequence_producer);
+}
+EXPORT_SYMBOL(zstd_register_sequence_producer);
+
+size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity,
+ const zstd_sequence *in_seqs, size_t in_seqs_size,
+ const void* literals, size_t lit_size, size_t lit_capacity,
+ size_t decompressed_size)
+{
+ return ZSTD_compressSequencesAndLiterals(cctx, dst, dst_capacity, in_seqs,
+ in_seqs_size, literals, lit_size,
+ lit_capacity, decompressed_size);
+}
+EXPORT_SYMBOL(zstd_compress_sequences_and_literals);
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Zstd Compressor");
diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
index 469fc3059be0..0ae819f0c927 100644
--- a/lib/zstd/zstd_decompress_module.c
+++ b/lib/zstd/zstd_decompress_module.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (c) Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -113,7 +113,7 @@ EXPORT_SYMBOL(zstd_init_dstream);
size_t zstd_reset_dstream(zstd_dstream *dstream)
{
- return ZSTD_resetDStream(dstream);
+ return ZSTD_DCtx_reset(dstream, ZSTD_reset_session_only);
}
EXPORT_SYMBOL(zstd_reset_dstream);