From 070a49e060f7f10bafbdfcf3c740d3d69ec96d05 Mon Sep 17 00:00:00 2001 From: ssengupta Date: Tue, 26 Jan 2021 17:27:14 +0000 Subject: [PATCH] Bug 1340901 - Update Snappy to version 1.1.8. r=dom-workers-and-storage-reviewers,asuth Add a static assertion in IndexedDB to detect future updates. Differential Revision: https://phabricator.services.mozilla.com/D56708 --- dom/indexedDB/ActorsParentCommon.cpp | 2 + other-licenses/snappy/README | 18 +- other-licenses/snappy/snappy-stubs-public.h | 22 +- other-licenses/snappy/src/CONTRIBUTING.md | 26 + other-licenses/snappy/src/ChangeLog | 2468 ----------------- other-licenses/snappy/src/NEWS | 48 + .../snappy/src/{README => README.md} | 45 +- other-licenses/snappy/src/snappy-internal.h | 83 +- .../snappy/src/snappy-stubs-internal.cc | 2 +- .../snappy/src/snappy-stubs-internal.h | 154 +- .../snappy/src/snappy-stubs-public.h.in | 66 +- other-licenses/snappy/src/snappy-test.cc | 48 +- other-licenses/snappy/src/snappy-test.h | 122 +- other-licenses/snappy/src/snappy.cc | 835 ++++-- other-licenses/snappy/src/snappy.h | 24 +- .../snappy/src/snappy_compress_fuzzer.cc | 59 + .../snappy/src/snappy_uncompress_fuzzer.cc | 57 + other-licenses/snappy/src/snappy_unittest.cc | 507 ++-- 18 files changed, 1280 insertions(+), 3306 deletions(-) create mode 100644 other-licenses/snappy/src/CONTRIBUTING.md delete mode 100644 other-licenses/snappy/src/ChangeLog rename other-licenses/snappy/src/{README => README.md} (86%) create mode 100644 other-licenses/snappy/src/snappy_compress_fuzzer.cc create mode 100644 other-licenses/snappy/src/snappy_uncompress_fuzzer.cc diff --git a/dom/indexedDB/ActorsParentCommon.cpp b/dom/indexedDB/ActorsParentCommon.cpp index 724cf9ef22d3..7068b38635b6 100644 --- a/dom/indexedDB/ActorsParentCommon.cpp +++ b/dom/indexedDB/ActorsParentCommon.cpp @@ -61,6 +61,8 @@ class nsIFile; namespace mozilla::dom::indexedDB { +static_assert(SNAPPY_VERSION == 0x010108); + using mozilla::ipc::IsOnBackgroundThread; namespace { diff --git a/other-licenses/snappy/README b/other-licenses/snappy/README index c0e7d4296046..357e1dd829ae 100644 --- a/other-licenses/snappy/README +++ b/other-licenses/snappy/README @@ -4,23 +4,21 @@ Mozilla does not modify the actual snappy source with the exception of the 'snappy-stubs-public.h' header. We have replaced its build system with our own. Snappy comes from: - http://code.google.com/p/snappy/ + https://github.com/google/snappy -We are currently using revision: 114 +We are currently using revision: 1.1.8 To upgrade to a newer version: 1. Check out the new code using subversion. 2. Update 'snappy-stubs-public.h' in this directory with any changes that were made to 'snappy-stubs-public.h.in' in the new source. - 3. Copy the major/minor/patch versions from 'configure.ac' into + 3. Copy the major/minor/patch versions from 'CMakeLists.txt' into 'snappy-stubs-public.h'. 4. Copy all source files from the new version into the src subdirectory. The - following files are not needed: - - 'autom4te.cache' subdirectory - - 'm4' subdirectory + following are not needed: + - 'CMakeLists.txt' file + - 'cmake' subdirectory + - 'docs' subdirectory - 'testdata' subdirectory - - 'autogen.sh' - - 'configure.ac' - - 'Makefile.am' - - 'snappy.pc.in' 5. Update the revision stamp in this file. + diff --git a/other-licenses/snappy/snappy-stubs-public.h b/other-licenses/snappy/snappy-stubs-public.h index 18bdb14f46e0..3141fcddcfb8 100644 --- a/other-licenses/snappy/snappy-stubs-public.h +++ b/other-licenses/snappy/snappy-stubs-public.h @@ -39,14 +39,14 @@ #include #if defined IS_BIG_ENDIAN || defined __BIG_ENDIAN__ -#define WORDS_BIGENDIAN +# define WORDS_BIGENDIAN #endif #define SNAPPY_MAJOR 1 #define SNAPPY_MINOR 1 -#define SNAPPY_PATCHLEVEL 3 +#define SNAPPY_PATCHLEVEL 8 #define SNAPPY_VERSION \ - ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL) + ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL) #include @@ -64,22 +64,22 @@ typedef uint64_t uint64; typedef std::string string; #ifndef DISALLOW_COPY_AND_ASSIGN -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&); \ - void operator=(const TypeName&) +# define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&); \ + void operator=(const TypeName&) #endif struct iovec { - void* iov_base; - size_t iov_len; + void* iov_base; + size_t iov_len; }; #if defined(_WIN32) || defined(_WIN64) -#if defined(_WIN64) +# if defined(_WIN64) typedef __int64 LONG_PTR; -#else +# else typedef long LONG_PTR; -#endif +# endif typedef LONG_PTR SSIZE_T; typedef SSIZE_T ssize_t; #endif diff --git a/other-licenses/snappy/src/CONTRIBUTING.md b/other-licenses/snappy/src/CONTRIBUTING.md new file mode 100644 index 000000000000..c7b84516c2f9 --- /dev/null +++ b/other-licenses/snappy/src/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +Please make sure that all the automated checks (CLA, AppVeyor, Travis) pass for +your pull requests. Pull requests whose checks fail may be ignored. diff --git a/other-licenses/snappy/src/ChangeLog b/other-licenses/snappy/src/ChangeLog deleted file mode 100644 index 1478db55014b..000000000000 --- a/other-licenses/snappy/src/ChangeLog +++ /dev/null @@ -1,2468 +0,0 @@ -commit eb66d8176b3d1f560ee012e1b488cb1540c45f88 -Author: Steinar H. Gunderson -Date: Mon Jun 22 16:10:47 2015 +0200 - - Initialized members of SnappyArrayWriter and SnappyDecompressionValidator. - These members were almost surely initialized before use by other member - functions, but Coverity was warning about this. Eliminating these warnings - minimizes clutter in that report and the likelihood of overlooking a real bug. - - A=cmumford - R=jeff - -commit b2312c4c25883ab03b5110f1b006dce95f419a4f -Author: Steinar H. Gunderson -Date: Mon Jun 22 16:03:28 2015 +0200 - - Add support for Uncompress(source, sink). Various changes to allow - Uncompress(source, sink) to get the same performance as the different - variants of Uncompress to Cord/DataBuffer/String/FlatBuffer. - - Changes to efficiently support Uncompress(source, sink) - -------- - - a) For strings - we add support to StringByteSink to do GetAppendBuffer so we - can write to it without copying. - b) For flat array buffers, we do GetAppendBuffer and see if we can get a full buffer. - - With the above changes we get performance with ByteSource/ByteSink - that is very close to directly using flat arrays and strings. - - We add various benchmark cases to demonstrate that. - - Orthogonal change - ------------------ - - Add support for TryFastAppend() for SnappyScatteredWriter. - - Benchmark results are below - - CPU: Intel Core2 dL1:32KB dL2:4096KB - Benchmark Time(ns) CPU(ns) Iterations - ----------------------------------------------------- - BM_UFlat/0 109065 108996 6410 896.0MB/s html - BM_UFlat/1 1012175 1012343 691 661.4MB/s urls - BM_UFlat/2 26775 26771 26149 4.4GB/s jpg - BM_UFlat/3 48947 48940 14363 1.8GB/s pdf - BM_UFlat/4 441029 440835 1589 886.1MB/s html4 - BM_UFlat/5 39861 39880 17823 588.3MB/s cp - BM_UFlat/6 18315 18300 38126 581.1MB/s c - BM_UFlat/7 5254 5254 100000 675.4MB/s lsp - BM_UFlat/8 1568060 1567376 447 626.6MB/s xls - BM_UFlat/9 337512 337734 2073 429.5MB/s txt1 - BM_UFlat/10 287269 287054 2434 415.9MB/s txt2 - BM_UFlat/11 890098 890219 787 457.2MB/s txt3 - BM_UFlat/12 1186593 1186863 590 387.2MB/s txt4 - BM_UFlat/13 573927 573318 1000 853.7MB/s bin - BM_UFlat/14 64250 64294 10000 567.2MB/s sum - BM_UFlat/15 7301 7300 96153 552.2MB/s man - BM_UFlat/16 109617 109636 6375 1031.5MB/s pb - BM_UFlat/17 364438 364497 1921 482.3MB/s gaviota - BM_UFlatSink/0 108518 108465 6450 900.4MB/s html - BM_UFlatSink/1 991952 991997 705 675.0MB/s urls - BM_UFlatSink/2 26815 26798 26065 4.4GB/s jpg - BM_UFlatSink/3 49127 49122 14255 1.8GB/s pdf - BM_UFlatSink/4 436674 436731 1604 894.4MB/s html4 - BM_UFlatSink/5 39738 39733 17345 590.5MB/s cp - BM_UFlatSink/6 18413 18416 37962 577.4MB/s c - BM_UFlatSink/7 5677 5676 100000 625.2MB/s lsp - BM_UFlatSink/8 1552175 1551026 451 633.2MB/s xls - BM_UFlatSink/9 338526 338489 2065 428.5MB/s txt1 - BM_UFlatSink/10 289387 289307 2420 412.6MB/s txt2 - BM_UFlatSink/11 893803 893706 783 455.4MB/s txt3 - BM_UFlatSink/12 1195919 1195459 586 384.4MB/s txt4 - BM_UFlatSink/13 559637 559779 1000 874.3MB/s bin - BM_UFlatSink/14 65073 65094 10000 560.2MB/s sum - BM_UFlatSink/15 7618 7614 92823 529.5MB/s man - BM_UFlatSink/16 110085 110121 6352 1027.0MB/s pb - BM_UFlatSink/17 369196 368915 1896 476.5MB/s gaviota - BM_UValidate/0 46954 46957 14899 2.0GB/s html - BM_UValidate/1 500621 500868 1000 1.3GB/s urls - BM_UValidate/2 283 283 2481447 417.2GB/s jpg - BM_UValidate/3 16230 16228 43137 5.4GB/s pdf - BM_UValidate/4 189129 189193 3701 2.0GB/s html4 - - A=uday - R=sanjay - -commit b2ad96006741d40935db2f73194a3e489b467338 -Author: Steinar H. Gunderson -Date: Mon Jun 22 15:48:29 2015 +0200 - - Changes to eliminate compiler warnings on MSVC - - This code was not compiling under Visual Studio 2013 with warnings being treated - as errors. Specifically: - - 1. Changed int -> size_t to eliminate signed/unsigned mismatch warning. - 2. Added some missing return values to functions. - 3. Inserting character instead of integer literals into strings to avoid type - conversions. - - A=cmumford - R=jeff - -commit e7a897e187e90b33f87bd9e64872cf561de9ebca -Author: Steinar H. Gunderson -Date: Mon Jun 22 15:45:11 2015 +0200 - - Fixed unit tests to compile under MSVC. - - 1. Including config.h in test. - 2. Including windows.h before zippy-test.h. - 3. Removed definition of WIN32_LEAN_AND_MEAN. This caused problems in - build environments that define WIN32_LEAN_AND_MEAN as our - definition didn't check for prior existence. This constant is old - and no longer needed anyhow. - 4. Disable MSVC warning 4722 since ~LogMessageCrash() never returns. - - A=cmumford - R=jeff - -commit 86eb8b152bdb065ad11bf331a9f7d65b72616acf -Author: Steinar H. Gunderson -Date: Mon Jun 22 15:41:30 2015 +0200 - - Change a few branch annotations that profiling found to be wrong. - Overall performance is neutral or slightly positive. - - Westmere (64-bit, opt): - - Benchmark Base (ns) New (ns) Improvement - -------------------------------------------------------------------------------------- - BM_UFlat/0 73798 71464 1.3GB/s html +3.3% - BM_UFlat/1 715223 704318 953.5MB/s urls +1.5% - BM_UFlat/2 8137 8871 13.0GB/s jpg -8.3% - BM_UFlat/3 200 204 935.5MB/s jpg_200 -2.0% - BM_UFlat/4 21627 21281 4.5GB/s pdf +1.6% - BM_UFlat/5 302806 290350 1.3GB/s html4 +4.3% - BM_UFlat/6 218920 219017 664.1MB/s txt1 -0.0% - BM_UFlat/7 190437 191212 626.1MB/s txt2 -0.4% - BM_UFlat/8 584192 580484 703.4MB/s txt3 +0.6% - BM_UFlat/9 776537 779055 591.6MB/s txt4 -0.3% - BM_UFlat/10 76056 72606 1.5GB/s pb +4.8% - BM_UFlat/11 235962 239043 737.4MB/s gaviota -1.3% - BM_UFlat/12 28049 28000 840.1MB/s cp +0.2% - BM_UFlat/13 12225 12021 886.9MB/s c +1.7% - BM_UFlat/14 3362 3544 1004.0MB/s lsp -5.1% - BM_UFlat/15 937015 939206 1048.9MB/s xls -0.2% - BM_UFlat/16 236 233 823.1MB/s xls_200 +1.3% - BM_UFlat/17 373170 361947 1.3GB/s bin +3.1% - BM_UFlat/18 264 264 725.5MB/s bin_200 +0.0% - BM_UFlat/19 42834 43577 839.2MB/s sum -1.7% - BM_UFlat/20 4770 4736 853.6MB/s man +0.7% - BM_UValidate/0 39671 39944 2.4GB/s html -0.7% - BM_UValidate/1 443391 443391 1.5GB/s urls +0.0% - BM_UValidate/2 163 163 703.3GB/s jpg +0.0% - BM_UValidate/3 113 112 1.7GB/s jpg_200 +0.9% - BM_UValidate/4 7555 7608 12.6GB/s pdf -0.7% - BM_ZFlat/0 157616 157568 621.5MB/s html (22.31 %) +0.0% - BM_ZFlat/1 1997290 2014486 333.4MB/s urls (47.77 %) -0.9% - BM_ZFlat/2 23035 22237 5.2GB/s jpg (99.95 %) +3.6% - BM_ZFlat/3 539 540 354.5MB/s jpg_200 (73.00 %) -0.2% - BM_ZFlat/4 80709 81369 1.2GB/s pdf (81.85 %) -0.8% - BM_ZFlat/5 639059 639220 613.0MB/s html4 (22.51 %) -0.0% - BM_ZFlat/6 577203 583370 249.3MB/s txt1 (57.87 %) -1.1% - BM_ZFlat/7 510887 516094 232.0MB/s txt2 (61.93 %) -1.0% - BM_ZFlat/8 1535843 1556973 262.2MB/s txt3 (54.92 %) -1.4% - BM_ZFlat/9 2070068 2102380 219.3MB/s txt4 (66.22 %) -1.5% - BM_ZFlat/10 152396 152148 745.5MB/s pb (19.64 %) +0.2% - BM_ZFlat/11 447367 445859 395.4MB/s gaviota (37.72 %) +0.3% - BM_ZFlat/12 76375 76797 306.3MB/s cp (48.12 %) -0.5% - BM_ZFlat/13 31518 31987 333.3MB/s c (42.40 %) -1.5% - BM_ZFlat/14 10598 10827 328.6MB/s lsp (48.37 %) -2.1% - BM_ZFlat/15 1782243 1802728 546.5MB/s xls (41.23 %) -1.1% - BM_ZFlat/16 526 539 355.0MB/s xls_200 (78.00 %) -2.4% - BM_ZFlat/17 598141 597311 822.1MB/s bin (18.11 %) +0.1% - BM_ZFlat/18 121 120 1.6GB/s bin_200 (7.50 %) +0.8% - BM_ZFlat/19 109981 112173 326.0MB/s sum (48.96 %) -2.0% - BM_ZFlat/20 14355 14575 277.4MB/s man (59.36 %) -1.5% - Sum of all benchmarks 33882722 33879325 +0.0% - - Sandy Bridge (64-bit, opt): - - Benchmark Base (ns) New (ns) Improvement - -------------------------------------------------------------------------------------- - BM_UFlat/0 43764 41600 2.3GB/s html +5.2% - BM_UFlat/1 517990 507058 1.3GB/s urls +2.2% - BM_UFlat/2 6625 5529 20.8GB/s jpg +19.8% - BM_UFlat/3 154 155 1.2GB/s jpg_200 -0.6% - BM_UFlat/4 12795 11747 8.1GB/s pdf +8.9% - BM_UFlat/5 200335 193413 2.0GB/s html4 +3.6% - BM_UFlat/6 156574 156426 929.2MB/s txt1 +0.1% - BM_UFlat/7 137574 137464 870.4MB/s txt2 +0.1% - BM_UFlat/8 422551 421603 967.4MB/s txt3 +0.2% - BM_UFlat/9 577749 578985 795.6MB/s txt4 -0.2% - BM_UFlat/10 42329 39362 2.8GB/s pb +7.5% - BM_UFlat/11 170615 169751 1037.9MB/s gaviota +0.5% - BM_UFlat/12 12800 12719 1.8GB/s cp +0.6% - BM_UFlat/13 6585 6579 1.6GB/s c +0.1% - BM_UFlat/14 2066 2044 1.7GB/s lsp +1.1% - BM_UFlat/15 750861 746911 1.3GB/s xls +0.5% - BM_UFlat/16 188 192 996.0MB/s xls_200 -2.1% - BM_UFlat/17 271622 264333 1.8GB/s bin +2.8% - BM_UFlat/18 208 207 923.6MB/s bin_200 +0.5% - BM_UFlat/19 24667 24845 1.4GB/s sum -0.7% - BM_UFlat/20 2663 2662 1.5GB/s man +0.0% - BM_ZFlat/0 115173 115624 846.5MB/s html (22.31 %) -0.4% - BM_ZFlat/1 1530331 1537769 436.5MB/s urls (47.77 %) -0.5% - BM_ZFlat/2 17503 17013 6.8GB/s jpg (99.95 %) +2.9% - BM_ZFlat/3 385 385 496.3MB/s jpg_200 (73.00 %) +0.0% - BM_ZFlat/4 61753 61540 1.6GB/s pdf (81.85 %) +0.3% - BM_ZFlat/5 484806 483356 810.1MB/s html4 (22.51 %) +0.3% - BM_ZFlat/6 464143 467609 310.9MB/s txt1 (57.87 %) -0.7% - BM_ZFlat/7 410315 413319 289.5MB/s txt2 (61.93 %) -0.7% - BM_ZFlat/8 1244082 1249381 326.5MB/s txt3 (54.92 %) -0.4% - BM_ZFlat/9 1696914 1709685 269.4MB/s txt4 (66.22 %) -0.7% - BM_ZFlat/10 104148 103372 1096.7MB/s pb (19.64 %) +0.8% - BM_ZFlat/11 363522 359722 489.8MB/s gaviota (37.72 %) +1.1% - BM_ZFlat/12 47021 50095 469.3MB/s cp (48.12 %) -6.1% - BM_ZFlat/13 16888 16985 627.4MB/s c (42.40 %) -0.6% - BM_ZFlat/14 5496 5469 650.3MB/s lsp (48.37 %) +0.5% - BM_ZFlat/15 1460713 1448760 679.5MB/s xls (41.23 %) +0.8% - BM_ZFlat/16 387 393 486.8MB/s xls_200 (78.00 %) -1.5% - BM_ZFlat/17 457654 451462 1086.6MB/s bin (18.11 %) +1.4% - BM_ZFlat/18 97 87 2.1GB/s bin_200 (7.50 %) +11.5% - BM_ZFlat/19 77904 80924 451.7MB/s sum (48.96 %) -3.7% - BM_ZFlat/20 7648 7663 527.1MB/s man (59.36 %) -0.2% - Sum of all benchmarks 25493635 25482069 +0.0% - - A=dehao - R=sesse - -commit 11ccdfb868387e56d845766d89ddab9d489c4128 -Author: Steinar H. Gunderson -Date: Mon Jun 22 16:07:58 2015 +0200 - - Sync with various Google-internal changes. - - Should not mean much for the open-source version. - -commit 22acaf438ed93ab21a2ff1919d173206798b996e -Author: Steinar H. Gunderson -Date: Mon Jun 22 15:39:08 2015 +0200 - - Change some internal path names. - - This is mostly to sync up with some changes from Google's internal - repositories; it does not affect the open-source distribution in itself. - -commit 1ff9be9b8fafc8528ca9e055646f5932aa5db9c4 -Author: snappy.mirrorbot@gmail.com -Date: Fri Feb 28 11:18:07 2014 +0000 - - Release Snappy 1.1.2. - - R=jeff - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@84 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 19690d78e83f8963f497585031efa3d9ca66b807 -Author: snappy.mirrorbot@gmail.com -Date: Wed Feb 19 10:31:49 2014 +0000 - - Fix public issue 82: Stop distributing benchmark data files that have - unclear or unsuitable licensing. - - In general, we replace the files we can with liberally licensed data, - and remove all the others (in particular all the parts of the Canterbury - corpus that are not clearly in the public domain). The replacements - do not always have the exact same characteristics as the original ones, - but they are more than good enough to be useful for benchmarking. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@83 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit f82bff66afe0de4c9ae22f8c4ef84e3c2233e799 -Author: snappy.mirrorbot@gmail.com -Date: Fri Oct 25 13:31:27 2013 +0000 - - Add support for padding in the Snappy framed format. - - This is specifically motivated by DICOM's demands that embedded data - must be of an even number of bytes, but could in principle be used for - any sort of padding/alignment needed. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@82 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit eeead8dc38ea359f027fb6e89f345448e8e9d723 -Author: snappy.mirrorbot@gmail.com -Date: Tue Oct 15 15:21:31 2013 +0000 - - Release Snappy 1.1.1. - - R=jeff - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@81 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 6bc39e24c76adbbff26ae629fafbf7dfc795f554 -Author: snappy.mirrorbot@gmail.com -Date: Tue Aug 13 12:55:00 2013 +0000 - - Add autoconf tests for size_t and ssize_t. Sort-of resolves public issue 79; - it would solve the problem if MSVC typically used autoconf. However, it gives - a natural place (config.h) to put the typedef even for MSVC. - - R=jsbell - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@80 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 7c3c01df77e191ad1f8377448961fe88db2802e9 -Author: snappy.mirrorbot@gmail.com -Date: Mon Jul 29 11:06:44 2013 +0000 - - When we compare the number of bytes produced with the offset for a - backreference, make the signedness of the bytes produced clear, - by sticking it into a size_t. This avoids a signed/unsigned compare - warning from MSVC (public issue 71), and also is slightly clearer. - - Since the line is now so long the explanatory comment about the -1u - trick has to go somewhere else anyway, I used the opportunity to - explain it in slightly more detail. - - This is a purely stylistic change; the emitted assembler from GCC - is identical. - - R=jeff - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@79 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 2f0aaf8631d8fb2475ca1a6687c181efb14ed286 -Author: snappy.mirrorbot@gmail.com -Date: Sun Jun 30 19:24:03 2013 +0000 - - In the fast path for decompressing literals, instead of checking - whether there's 16 bytes free and then checking right afterwards - (when having subtracted the literal size) that there are now - 5 bytes free, just check once for 21 bytes. This skips a compare - and a branch; although it is easily predictable, it is still - a few cycles on a fast path that we would like to get rid of. - - Benchmarking this yields very confusing results. On open-source - GCC 4.8.1 on Haswell, we get exactly the expected results; the - benchmarks where we hit the fast path for literals (in particular - the two HTML benchmarks and the protobuf benchmark) give very nice - speedups, and the others are not really affected. - - However, benchmarks with Google's GCC branch on other hardware - is much less clear. It seems that we have a weak loss in some cases - (and the win for the “typical” win cases are not nearly as clear), - but that it depends on microarchitecture and plain luck in how we run - the benchmark. Looking at the generated assembler, it seems that - the removal of the if causes other large-scale changes in how the - function is laid out, which makes it likely that this is just bad luck. - - Thus, we should keep this change, even though its exact current impact is - unclear; it's a sensible change per se, and dropping it on the basis of - microoptimization for a given compiler (or even branch of a compiler) - would seem like a bad strategy in the long run. - - Microbenchmark results (all in 64-bit, opt mode): - - Nehalem, Google GCC: - - Benchmark Base (ns) New (ns) Improvement - ------------------------------------------------------------------------------ - BM_UFlat/0 76747 75591 1.3GB/s html +1.5% - BM_UFlat/1 765756 757040 886.3MB/s urls +1.2% - BM_UFlat/2 10867 10893 10.9GB/s jpg -0.2% - BM_UFlat/3 124 131 1.4GB/s jpg_200 -5.3% - BM_UFlat/4 31663 31596 2.8GB/s pdf +0.2% - BM_UFlat/5 314162 308176 1.2GB/s html4 +1.9% - BM_UFlat/6 29668 29746 790.6MB/s cp -0.3% - BM_UFlat/7 12958 13386 796.4MB/s c -3.2% - BM_UFlat/8 3596 3682 966.0MB/s lsp -2.3% - BM_UFlat/9 1019193 1033493 953.3MB/s xls -1.4% - BM_UFlat/10 239 247 775.3MB/s xls_200 -3.2% - BM_UFlat/11 236411 240271 606.9MB/s txt1 -1.6% - BM_UFlat/12 206639 209768 571.2MB/s txt2 -1.5% - BM_UFlat/13 627803 635722 641.4MB/s txt3 -1.2% - BM_UFlat/14 845932 857816 538.2MB/s txt4 -1.4% - BM_UFlat/15 402107 391670 1.2GB/s bin +2.7% - BM_UFlat/16 283 279 683.6MB/s bin_200 +1.4% - BM_UFlat/17 46070 46815 781.5MB/s sum -1.6% - BM_UFlat/18 5053 5163 782.0MB/s man -2.1% - BM_UFlat/19 79721 76581 1.4GB/s pb +4.1% - BM_UFlat/20 251158 252330 697.5MB/s gaviota -0.5% - Sum of all benchmarks 4966150 4980396 -0.3% - - - Sandy Bridge, Google GCC: - - Benchmark Base (ns) New (ns) Improvement - ------------------------------------------------------------------------------ - BM_UFlat/0 42850 42182 2.3GB/s html +1.6% - BM_UFlat/1 525660 515816 1.3GB/s urls +1.9% - BM_UFlat/2 7173 7283 16.3GB/s jpg -1.5% - BM_UFlat/3 92 91 2.1GB/s jpg_200 +1.1% - BM_UFlat/4 15147 14872 5.9GB/s pdf +1.8% - BM_UFlat/5 199936 192116 2.0GB/s html4 +4.1% - BM_UFlat/6 12796 12443 1.8GB/s cp +2.8% - BM_UFlat/7 6588 6400 1.6GB/s c +2.9% - BM_UFlat/8 2010 1951 1.8GB/s lsp +3.0% - BM_UFlat/9 761124 763049 1.3GB/s xls -0.3% - BM_UFlat/10 186 189 1016.1MB/s xls_200 -1.6% - BM_UFlat/11 159354 158460 918.6MB/s txt1 +0.6% - BM_UFlat/12 139732 139950 856.1MB/s txt2 -0.2% - BM_UFlat/13 429917 425027 961.7MB/s txt3 +1.2% - BM_UFlat/14 585255 587324 785.8MB/s txt4 -0.4% - BM_UFlat/15 276186 266173 1.8GB/s bin +3.8% - BM_UFlat/16 205 207 925.5MB/s bin_200 -1.0% - BM_UFlat/17 24925 24935 1.4GB/s sum -0.0% - BM_UFlat/18 2632 2576 1.5GB/s man +2.2% - BM_UFlat/19 40546 39108 2.8GB/s pb +3.7% - BM_UFlat/20 175803 168209 1048.9MB/s gaviota +4.5% - Sum of all benchmarks 3408117 3368361 +1.2% - - - Haswell, upstream GCC 4.8.1: - - Benchmark Base (ns) New (ns) Improvement - ------------------------------------------------------------------------------ - BM_UFlat/0 46308 40641 2.3GB/s html +13.9% - BM_UFlat/1 513385 514706 1.3GB/s urls -0.3% - BM_UFlat/2 6197 6151 19.2GB/s jpg +0.7% - BM_UFlat/3 61 61 3.0GB/s jpg_200 +0.0% - BM_UFlat/4 13551 13429 6.5GB/s pdf +0.9% - BM_UFlat/5 198317 190243 2.0GB/s html4 +4.2% - BM_UFlat/6 14768 12560 1.8GB/s cp +17.6% - BM_UFlat/7 6453 6447 1.6GB/s c +0.1% - BM_UFlat/8 1991 1980 1.8GB/s lsp +0.6% - BM_UFlat/9 766947 770424 1.2GB/s xls -0.5% - BM_UFlat/10 170 169 1.1GB/s xls_200 +0.6% - BM_UFlat/11 164350 163554 888.7MB/s txt1 +0.5% - BM_UFlat/12 145444 143830 832.1MB/s txt2 +1.1% - BM_UFlat/13 437849 438413 929.2MB/s txt3 -0.1% - BM_UFlat/14 603587 605309 759.8MB/s txt4 -0.3% - BM_UFlat/15 249799 248067 1.9GB/s bin +0.7% - BM_UFlat/16 191 188 1011.4MB/s bin_200 +1.6% - BM_UFlat/17 26064 24778 1.4GB/s sum +5.2% - BM_UFlat/18 2620 2601 1.5GB/s man +0.7% - BM_UFlat/19 44551 37373 3.0GB/s pb +19.2% - BM_UFlat/20 165408 164584 1.0GB/s gaviota +0.5% - Sum of all benchmarks 3408011 3385508 +0.7% - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@78 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 062bf544a61107db730b6d08cb0b159c4dd9b24c -Author: snappy.mirrorbot@gmail.com -Date: Fri Jun 14 21:42:26 2013 +0000 - - Make the two IncrementalCopy* functions take in an ssize_t instead of a len, - in order to avoid having to do 32-to-64-bit signed conversions on a hot path - during decompression. (Also fixes some MSVC warnings, mentioned in public - issue 75, but more of those remain.) They cannot be size_t because we expect - them to go negative and test for that. - - This saves a few movzwl instructions, yielding ~2% speedup in decompression. - - - Sandy Bridge: - - Benchmark Base (ns) New (ns) Improvement - ------------------------------------------------------------------------------------------------- - BM_UFlat/0 48009 41283 2.3GB/s html +16.3% - BM_UFlat/1 531274 513419 1.3GB/s urls +3.5% - BM_UFlat/2 7378 7062 16.8GB/s jpg +4.5% - BM_UFlat/3 92 92 2.0GB/s jpg_200 +0.0% - BM_UFlat/4 15057 14974 5.9GB/s pdf +0.6% - BM_UFlat/5 204323 193140 2.0GB/s html4 +5.8% - BM_UFlat/6 13282 12611 1.8GB/s cp +5.3% - BM_UFlat/7 6511 6504 1.6GB/s c +0.1% - BM_UFlat/8 2014 2030 1.7GB/s lsp -0.8% - BM_UFlat/9 775909 768336 1.3GB/s xls +1.0% - BM_UFlat/10 182 184 1043.2MB/s xls_200 -1.1% - BM_UFlat/11 167352 161630 901.2MB/s txt1 +3.5% - BM_UFlat/12 147393 142246 842.8MB/s txt2 +3.6% - BM_UFlat/13 449960 432853 944.4MB/s txt3 +4.0% - BM_UFlat/14 620497 594845 775.9MB/s txt4 +4.3% - BM_UFlat/15 265610 267356 1.8GB/s bin -0.7% - BM_UFlat/16 206 205 932.7MB/s bin_200 +0.5% - BM_UFlat/17 25561 24730 1.4GB/s sum +3.4% - BM_UFlat/18 2620 2644 1.5GB/s man -0.9% - BM_UFlat/19 45766 38589 2.9GB/s pb +18.6% - BM_UFlat/20 171107 169832 1039.5MB/s gaviota +0.8% - Sum of all benchmarks 3500103 3394565 +3.1% - - - Westmere: - - Benchmark Base (ns) New (ns) Improvement - ------------------------------------------------------------------------------------------------- - BM_UFlat/0 72624 71526 1.3GB/s html +1.5% - BM_UFlat/1 735821 722917 930.8MB/s urls +1.8% - BM_UFlat/2 10450 10172 11.7GB/s jpg +2.7% - BM_UFlat/3 117 117 1.6GB/s jpg_200 +0.0% - BM_UFlat/4 29817 29648 3.0GB/s pdf +0.6% - BM_UFlat/5 297126 293073 1.3GB/s html4 +1.4% - BM_UFlat/6 28252 27994 842.0MB/s cp +0.9% - BM_UFlat/7 12672 12391 862.1MB/s c +2.3% - BM_UFlat/8 3507 3425 1040.9MB/s lsp +2.4% - BM_UFlat/9 1004268 969395 1018.0MB/s xls +3.6% - BM_UFlat/10 233 227 844.8MB/s xls_200 +2.6% - BM_UFlat/11 230054 224981 647.8MB/s txt1 +2.3% - BM_UFlat/12 201229 196447 610.5MB/s txt2 +2.4% - BM_UFlat/13 609547 596761 685.3MB/s txt3 +2.1% - BM_UFlat/14 824362 804821 573.8MB/s txt4 +2.4% - BM_UFlat/15 371095 374899 1.3GB/s bin -1.0% - BM_UFlat/16 267 267 717.8MB/s bin_200 +0.0% - BM_UFlat/17 44623 43828 835.9MB/s sum +1.8% - BM_UFlat/18 5077 4815 841.0MB/s man +5.4% - BM_UFlat/19 74964 73210 1.5GB/s pb +2.4% - BM_UFlat/20 237987 236745 746.0MB/s gaviota +0.5% - Sum of all benchmarks 4794092 4697659 +2.1% - - - Istanbul: - - Benchmark Base (ns) New (ns) Improvement - ------------------------------------------------------------------------------------------------- - BM_UFlat/0 98614 96376 1020.4MB/s html +2.3% - BM_UFlat/1 963740 953241 707.2MB/s urls +1.1% - BM_UFlat/2 25042 24769 4.8GB/s jpg +1.1% - BM_UFlat/3 180 180 1065.6MB/s jpg_200 +0.0% - BM_UFlat/4 45942 45403 1.9GB/s pdf +1.2% - BM_UFlat/5 400135 390226 1008.2MB/s html4 +2.5% - BM_UFlat/6 37768 37392 631.9MB/s cp +1.0% - BM_UFlat/7 18585 18200 588.2MB/s c +2.1% - BM_UFlat/8 5751 5690 627.7MB/s lsp +1.1% - BM_UFlat/9 1543154 1542209 641.4MB/s xls +0.1% - BM_UFlat/10 381 388 494.6MB/s xls_200 -1.8% - BM_UFlat/11 339715 331973 440.1MB/s txt1 +2.3% - BM_UFlat/12 294807 289418 415.4MB/s txt2 +1.9% - BM_UFlat/13 906160 884094 463.3MB/s txt3 +2.5% - BM_UFlat/14 1224221 1198435 386.1MB/s txt4 +2.2% - BM_UFlat/15 516277 502923 979.5MB/s bin +2.7% - BM_UFlat/16 405 402 477.2MB/s bin_200 +0.7% - BM_UFlat/17 61640 60621 605.6MB/s sum +1.7% - BM_UFlat/18 7326 7383 549.5MB/s man -0.8% - BM_UFlat/19 94720 92653 1.2GB/s pb +2.2% - BM_UFlat/20 360435 346687 510.6MB/s gaviota +4.0% - Sum of all benchmarks 6944998 6828663 +1.7% - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@77 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 328aafa1980824a9afdcd50edc30d9d5157e417f -Author: snappy.mirrorbot@gmail.com -Date: Thu Jun 13 16:19:52 2013 +0000 - - Add support for uncompressing to iovecs (scatter I/O). - Windows does not have struct iovec defined anywhere, - so we define our own version that's equal to what UNIX - typically has. - - The bulk of this patch was contributed by Mohit Aron. - - R=jeff - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@76 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit cd92eb0852e2339187b693eef3595a07d2276c1d -Author: snappy.mirrorbot@gmail.com -Date: Wed Jun 12 19:51:15 2013 +0000 - - Some code reorganization needed for an internal change. - - R=fikes - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@75 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit a3e928d62bbd61b523b988c07b560253950cf73b -Author: snappy.mirrorbot@gmail.com -Date: Tue Apr 9 15:33:30 2013 +0000 - - Supports truncated test data in zippy benchmark. - - R=sesse - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@74 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit bde324c0169763688f35ee44630a26ad1f49eec3 -Author: snappy.mirrorbot@gmail.com -Date: Tue Feb 5 14:36:15 2013 +0000 - - Release Snappy 1.1.0. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@73 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 8168446c7eaaa0594e1f4ca923376dcf3a2846fa -Author: snappy.mirrorbot@gmail.com -Date: Tue Feb 5 14:30:05 2013 +0000 - - Make ./snappy_unittest pass without "srcdir" being defined. - - Previously, snappy_unittests would read from an absolute path /testdata/..; - convert it to use a relative path instead. - - Patch from Marc-Antonie Ruel. - - R=maruel - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@72 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 27a0cc394950ebdad2e8d67322f0862835b10bd9 -Author: snappy.mirrorbot@gmail.com -Date: Fri Jan 18 12:16:36 2013 +0000 - - Increase the Zippy block size from 32 kB to 64 kB, winning ~3% density - while being effectively performance neutral. - - The longer story about density is that we win 3-6% density on the benchmarks - where this has any effect at all; many of the benchmarks (cp, c, lsp, man) - are smaller than 32 kB and thus will have no effect. Binary data also seems - to win little or nothing; of course, the already-compressed data wins nothing. - The protobuf benchmark wins as much as ~18% depending on architecture, - but I wouldn't be too sure that this is representative of protobuf data in - general. - - As of performance, we lose a tiny amount since we get more tags (e.g., a long - literal might be broken up into literal-copy-literal), but we win it back with - less clearing of the hash table, and more opportunities to skip incompressible - data (e.g. in the jpg benchmark). Decompression seems to get ever so slightly - slower, again due to more tags. The total net change is about as close to zero - as we can get, so the end effect seems to be simply more density and no - real performance change. - - The comment about not changing kBlockSize, scary as it is, is not really - relevant, since we're never going to have a block-level decompressor without - explicitly marked blocks. Replace it with something more appropriate. - - This affects the framing format, but it's okay to change it since it basically - has no users yet. - - - Density (note that cp, c, lsp and man are all smaller than 32 kB): - - Benchmark Description Base (%) New (%) Improvement - -------------------------------------------------------------- - ZFlat/0 html 22.57 22.31 +5.6% - ZFlat/1 urls 50.89 47.77 +6.5% - ZFlat/2 jpg 99.88 99.87 +0.0% - ZFlat/3 pdf 82.13 82.07 +0.1% - ZFlat/4 html4 23.55 22.51 +4.6% - ZFlat/5 cp 48.12 48.12 +0.0% - ZFlat/6 c 42.40 42.40 +0.0% - ZFlat/7 lsp 48.37 48.37 +0.0% - ZFlat/8 xls 41.34 41.23 +0.3% - ZFlat/9 txt1 59.81 57.87 +3.4% - ZFlat/10 txt2 64.07 61.93 +3.5% - ZFlat/11 txt3 57.11 54.92 +4.0% - ZFlat/12 txt4 68.35 66.22 +3.2% - ZFlat/13 bin 18.21 18.11 +0.6% - ZFlat/14 sum 51.88 48.96 +6.0% - ZFlat/15 man 59.36 59.36 +0.0% - ZFlat/16 pb 23.15 19.64 +17.9% - ZFlat/17 gaviota 38.27 37.72 +1.5% - Geometric mean 45.51 44.15 +3.1% - - - Microbenchmarks (64-bit, opt): - - Westmere 2.8 GHz: - - Benchmark Base (ns) New (ns) Improvement - ------------------------------------------------------------------------------------------------- - BM_UFlat/0 75342 75027 1.3GB/s html +0.4% - BM_UFlat/1 723767 744269 899.6MB/s urls -2.8% - BM_UFlat/2 10072 10072 11.7GB/s jpg +0.0% - BM_UFlat/3 30747 30388 2.9GB/s pdf +1.2% - BM_UFlat/4 307353 306063 1.2GB/s html4 +0.4% - BM_UFlat/5 28593 28743 816.3MB/s cp -0.5% - BM_UFlat/6 12958 12998 818.1MB/s c -0.3% - BM_UFlat/7 3700 3792 935.8MB/s lsp -2.4% - BM_UFlat/8 999685 999905 982.1MB/s xls -0.0% - BM_UFlat/9 232954 230079 630.4MB/s txt1 +1.2% - BM_UFlat/10 200785 201468 592.6MB/s txt2 -0.3% - BM_UFlat/11 617267 610968 666.1MB/s txt3 +1.0% - BM_UFlat/12 821595 822475 558.7MB/s txt4 -0.1% - BM_UFlat/13 377097 377632 1.3GB/s bin -0.1% - BM_UFlat/14 45476 45260 805.8MB/s sum +0.5% - BM_UFlat/15 4985 5003 805.7MB/s man -0.4% - BM_UFlat/16 80813 77494 1.4GB/s pb +4.3% - BM_UFlat/17 251792 241553 727.7MB/s gaviota +4.2% - BM_UValidate/0 40343 40354 2.4GB/s html -0.0% - BM_UValidate/1 426890 451574 1.4GB/s urls -5.5% - BM_UValidate/2 187 179 661.9GB/s jpg +4.5% - BM_UValidate/3 13783 13827 6.4GB/s pdf -0.3% - BM_UValidate/4 162393 163335 2.3GB/s html4 -0.6% - BM_UDataBuffer/0 93756 93302 1046.7MB/s html +0.5% - BM_UDataBuffer/1 886714 916292 730.7MB/s urls -3.2% - BM_UDataBuffer/2 15861 16401 7.2GB/s jpg -3.3% - BM_UDataBuffer/3 38934 39224 2.2GB/s pdf -0.7% - BM_UDataBuffer/4 381008 379428 1029.5MB/s html4 +0.4% - BM_UCord/0 92528 91098 1072.0MB/s html +1.6% - BM_UCord/1 858421 885287 756.3MB/s urls -3.0% - BM_UCord/2 13140 13464 8.8GB/s jpg -2.4% - BM_UCord/3 39012 37773 2.3GB/s pdf +3.3% - BM_UCord/4 376869 371267 1052.1MB/s html4 +1.5% - BM_UCordString/0 75810 75303 1.3GB/s html +0.7% - BM_UCordString/1 735290 753841 888.2MB/s urls -2.5% - BM_UCordString/2 11945 13113 9.0GB/s jpg -8.9% - BM_UCordString/3 33901 32562 2.7GB/s pdf +4.1% - BM_UCordString/4 310985 309390 1.2GB/s html4 +0.5% - BM_UCordValidate/0 40952 40450 2.4GB/s html +1.2% - BM_UCordValidate/1 433842 456531 1.4GB/s urls -5.0% - BM_UCordValidate/2 1179 1173 100.8GB/s jpg +0.5% - BM_UCordValidate/3 14481 14392 6.1GB/s pdf +0.6% - BM_UCordValidate/4 164364 164151 2.3GB/s html4 +0.1% - BM_ZFlat/0 160610 156601 623.6MB/s html (22.31 %) +2.6% - BM_ZFlat/1 1995238 1993582 335.9MB/s urls (47.77 %) +0.1% - BM_ZFlat/2 30133 24983 4.7GB/s jpg (99.87 %) +20.6% - BM_ZFlat/3 74453 73128 1.2GB/s pdf (82.07 %) +1.8% - BM_ZFlat/4 647674 633729 616.4MB/s html4 (22.51 %) +2.2% - BM_ZFlat/5 76259 76090 308.4MB/s cp (48.12 %) +0.2% - BM_ZFlat/6 31106 31084 342.1MB/s c (42.40 %) +0.1% - BM_ZFlat/7 10507 10443 339.8MB/s lsp (48.37 %) +0.6% - BM_ZFlat/8 1811047 1793325 547.6MB/s xls (41.23 %) +1.0% - BM_ZFlat/9 597903 581793 249.3MB/s txt1 (57.87 %) +2.8% - BM_ZFlat/10 525320 514522 232.0MB/s txt2 (61.93 %) +2.1% - BM_ZFlat/11 1596591 1551636 262.3MB/s txt3 (54.92 %) +2.9% - BM_ZFlat/12 2134523 2094033 219.5MB/s txt4 (66.22 %) +1.9% - BM_ZFlat/13 593024 587869 832.6MB/s bin (18.11 %) +0.9% - BM_ZFlat/14 114746 110666 329.5MB/s sum (48.96 %) +3.7% - BM_ZFlat/15 14376 14485 278.3MB/s man (59.36 %) -0.8% - BM_ZFlat/16 167908 150070 753.6MB/s pb (19.64 %) +11.9% - BM_ZFlat/17 460228 442253 397.5MB/s gaviota (37.72 %) +4.1% - BM_ZCord/0 164896 160241 609.4MB/s html +2.9% - BM_ZCord/1 2070239 2043492 327.7MB/s urls +1.3% - BM_ZCord/2 54402 47002 2.5GB/s jpg +15.7% - BM_ZCord/3 85871 83832 1073.1MB/s pdf +2.4% - BM_ZCord/4 664078 648825 602.0MB/s html4 +2.4% - BM_ZDataBuffer/0 174874 172549 566.0MB/s html +1.3% - BM_ZDataBuffer/1 2134410 2139173 313.0MB/s urls -0.2% - BM_ZDataBuffer/2 71911 69551 1.7GB/s jpg +3.4% - BM_ZDataBuffer/3 98236 99727 902.1MB/s pdf -1.5% - BM_ZDataBuffer/4 710776 699104 558.8MB/s html4 +1.7% - Sum of all benchmarks 27358908 27200688 +0.6% - - - Sandy Bridge 2.6 GHz: - - Benchmark Base (ns) New (ns) Improvement - ------------------------------------------------------------------------------------------------- - BM_UFlat/0 49356 49018 1.9GB/s html +0.7% - BM_UFlat/1 516764 531955 1.2GB/s urls -2.9% - BM_UFlat/2 6982 7304 16.2GB/s jpg -4.4% - BM_UFlat/3 15285 15598 5.6GB/s pdf -2.0% - BM_UFlat/4 206557 206669 1.8GB/s html4 -0.1% - BM_UFlat/5 13681 13567 1.7GB/s cp +0.8% - BM_UFlat/6 6571 6592 1.6GB/s c -0.3% - BM_UFlat/7 2008 1994 1.7GB/s lsp +0.7% - BM_UFlat/8 775700 773286 1.2GB/s xls +0.3% - BM_UFlat/9 165578 164480 881.8MB/s txt1 +0.7% - BM_UFlat/10 143707 144139 828.2MB/s txt2 -0.3% - BM_UFlat/11 443026 436281 932.8MB/s txt3 +1.5% - BM_UFlat/12 603129 595856 771.2MB/s txt4 +1.2% - BM_UFlat/13 271682 270450 1.8GB/s bin +0.5% - BM_UFlat/14 26200 25666 1.4GB/s sum +2.1% - BM_UFlat/15 2620 2608 1.5GB/s man +0.5% - BM_UFlat/16 48908 47756 2.3GB/s pb +2.4% - BM_UFlat/17 174638 170346 1031.9MB/s gaviota +2.5% - BM_UValidate/0 31922 31898 3.0GB/s html +0.1% - BM_UValidate/1 341265 363554 1.8GB/s urls -6.1% - BM_UValidate/2 160 151 782.8GB/s jpg +6.0% - BM_UValidate/3 10402 10380 8.5GB/s pdf +0.2% - BM_UValidate/4 129490 130587 2.9GB/s html4 -0.8% - BM_UDataBuffer/0 59383 58736 1.6GB/s html +1.1% - BM_UDataBuffer/1 619222 637786 1049.8MB/s urls -2.9% - BM_UDataBuffer/2 10775 11941 9.9GB/s jpg -9.8% - BM_UDataBuffer/3 18002 17930 4.9GB/s pdf +0.4% - BM_UDataBuffer/4 259182 259306 1.5GB/s html4 -0.0% - BM_UCord/0 59379 57814 1.6GB/s html +2.7% - BM_UCord/1 598456 615162 1088.4MB/s urls -2.7% - BM_UCord/2 8519 8628 13.7GB/s jpg -1.3% - BM_UCord/3 18123 17537 5.0GB/s pdf +3.3% - BM_UCord/4 252375 252331 1.5GB/s html4 +0.0% - BM_UCordString/0 49494 49790 1.9GB/s html -0.6% - BM_UCordString/1 524659 541803 1.2GB/s urls -3.2% - BM_UCordString/2 8206 8354 14.2GB/s jpg -1.8% - BM_UCordString/3 17235 16537 5.3GB/s pdf +4.2% - BM_UCordString/4 210188 211072 1.8GB/s html4 -0.4% - BM_UCordValidate/0 31956 31587 3.0GB/s html +1.2% - BM_UCordValidate/1 340828 362141 1.8GB/s urls -5.9% - BM_UCordValidate/2 783 744 158.9GB/s jpg +5.2% - BM_UCordValidate/3 10543 10462 8.4GB/s pdf +0.8% - BM_UCordValidate/4 130150 129789 2.9GB/s html4 +0.3% - BM_ZFlat/0 113873 111200 878.2MB/s html (22.31 %) +2.4% - BM_ZFlat/1 1473023 1489858 449.4MB/s urls (47.77 %) -1.1% - BM_ZFlat/2 23569 19486 6.1GB/s jpg (99.87 %) +21.0% - BM_ZFlat/3 49178 48046 1.8GB/s pdf (82.07 %) +2.4% - BM_ZFlat/4 475063 469394 832.2MB/s html4 (22.51 %) +1.2% - BM_ZFlat/5 46910 46816 501.2MB/s cp (48.12 %) +0.2% - BM_ZFlat/6 16883 16916 628.6MB/s c (42.40 %) -0.2% - BM_ZFlat/7 5381 5447 651.5MB/s lsp (48.37 %) -1.2% - BM_ZFlat/8 1466870 1473861 666.3MB/s xls (41.23 %) -0.5% - BM_ZFlat/9 468006 464101 312.5MB/s txt1 (57.87 %) +0.8% - BM_ZFlat/10 408157 408957 291.9MB/s txt2 (61.93 %) -0.2% - BM_ZFlat/11 1253348 1232910 330.1MB/s txt3 (54.92 %) +1.7% - BM_ZFlat/12 1702373 1702977 269.8MB/s txt4 (66.22 %) -0.0% - BM_ZFlat/13 439792 438557 1116.0MB/s bin (18.11 %) +0.3% - BM_ZFlat/14 80766 78851 462.5MB/s sum (48.96 %) +2.4% - BM_ZFlat/15 7420 7542 534.5MB/s man (59.36 %) -1.6% - BM_ZFlat/16 112043 100126 1.1GB/s pb (19.64 %) +11.9% - BM_ZFlat/17 368877 357703 491.4MB/s gaviota (37.72 %) +3.1% - BM_ZCord/0 116402 113564 859.9MB/s html +2.5% - BM_ZCord/1 1507156 1519911 440.5MB/s urls -0.8% - BM_ZCord/2 39860 33686 3.5GB/s jpg +18.3% - BM_ZCord/3 56211 54694 1.6GB/s pdf +2.8% - BM_ZCord/4 485594 479212 815.1MB/s html4 +1.3% - BM_ZDataBuffer/0 123185 121572 803.3MB/s html +1.3% - BM_ZDataBuffer/1 1569111 1589380 421.3MB/s urls -1.3% - BM_ZDataBuffer/2 53143 49556 2.4GB/s jpg +7.2% - BM_ZDataBuffer/3 65725 66826 1.3GB/s pdf -1.6% - BM_ZDataBuffer/4 517871 514750 758.9MB/s html4 +0.6% - Sum of all benchmarks 20258879 20315484 -0.3% - - - AMD Instanbul 2.4 GHz: - - Benchmark Base (ns) New (ns) Improvement - ------------------------------------------------------------------------------------------------- - BM_UFlat/0 97120 96585 1011.1MB/s html +0.6% - BM_UFlat/1 917473 948016 706.3MB/s urls -3.2% - BM_UFlat/2 21496 23938 4.9GB/s jpg -10.2% - BM_UFlat/3 44751 45639 1.9GB/s pdf -1.9% - BM_UFlat/4 391950 391413 998.0MB/s html4 +0.1% - BM_UFlat/5 37366 37201 630.7MB/s cp +0.4% - BM_UFlat/6 18350 18318 580.5MB/s c +0.2% - BM_UFlat/7 5672 5661 626.9MB/s lsp +0.2% - BM_UFlat/8 1533390 1529441 642.1MB/s xls +0.3% - BM_UFlat/9 335477 336553 431.0MB/s txt1 -0.3% - BM_UFlat/10 285140 292080 408.7MB/s txt2 -2.4% - BM_UFlat/11 888507 894758 454.9MB/s txt3 -0.7% - BM_UFlat/12 1187643 1210928 379.5MB/s txt4 -1.9% - BM_UFlat/13 493717 507447 964.5MB/s bin -2.7% - BM_UFlat/14 61740 60870 599.1MB/s sum +1.4% - BM_UFlat/15 7211 7187 560.9MB/s man +0.3% - BM_UFlat/16 97435 93100 1.2GB/s pb +4.7% - BM_UFlat/17 362662 356395 493.2MB/s gaviota +1.8% - BM_UValidate/0 47475 47118 2.0GB/s html +0.8% - BM_UValidate/1 501304 529741 1.2GB/s urls -5.4% - BM_UValidate/2 276 243 486.2GB/s jpg +13.6% - BM_UValidate/3 16361 16261 5.4GB/s pdf +0.6% - BM_UValidate/4 190741 190353 2.0GB/s html4 +0.2% - BM_UDataBuffer/0 111080 109771 889.6MB/s html +1.2% - BM_UDataBuffer/1 1051035 1085999 616.5MB/s urls -3.2% - BM_UDataBuffer/2 25801 25463 4.6GB/s jpg +1.3% - BM_UDataBuffer/3 50493 49946 1.8GB/s pdf +1.1% - BM_UDataBuffer/4 447258 444138 879.5MB/s html4 +0.7% - BM_UCord/0 109350 107909 905.0MB/s html +1.3% - BM_UCord/1 1023396 1054964 634.7MB/s urls -3.0% - BM_UCord/2 25292 24371 4.9GB/s jpg +3.8% - BM_UCord/3 48955 49736 1.8GB/s pdf -1.6% - BM_UCord/4 440452 437331 893.2MB/s html4 +0.7% - BM_UCordString/0 98511 98031 996.2MB/s html +0.5% - BM_UCordString/1 933230 963495 694.9MB/s urls -3.1% - BM_UCordString/2 23311 24076 4.9GB/s jpg -3.2% - BM_UCordString/3 45568 46196 1.9GB/s pdf -1.4% - BM_UCordString/4 397791 396934 984.1MB/s html4 +0.2% - BM_UCordValidate/0 47537 46921 2.0GB/s html +1.3% - BM_UCordValidate/1 505071 532716 1.2GB/s urls -5.2% - BM_UCordValidate/2 1663 1621 72.9GB/s jpg +2.6% - BM_UCordValidate/3 16890 16926 5.2GB/s pdf -0.2% - BM_UCordValidate/4 192365 191984 2.0GB/s html4 +0.2% - BM_ZFlat/0 184708 179103 545.3MB/s html (22.31 %) +3.1% - BM_ZFlat/1 2293864 2302950 290.7MB/s urls (47.77 %) -0.4% - BM_ZFlat/2 52852 47618 2.5GB/s jpg (99.87 %) +11.0% - BM_ZFlat/3 100766 96179 935.3MB/s pdf (82.07 %) +4.8% - BM_ZFlat/4 741220 727977 536.6MB/s html4 (22.51 %) +1.8% - BM_ZFlat/5 85402 85418 274.7MB/s cp (48.12 %) -0.0% - BM_ZFlat/6 36558 36494 291.4MB/s c (42.40 %) +0.2% - BM_ZFlat/7 12706 12507 283.7MB/s lsp (48.37 %) +1.6% - BM_ZFlat/8 2336823 2335688 420.5MB/s xls (41.23 %) +0.0% - BM_ZFlat/9 701804 681153 212.9MB/s txt1 (57.87 %) +3.0% - BM_ZFlat/10 606700 597194 199.9MB/s txt2 (61.93 %) +1.6% - BM_ZFlat/11 1852283 1803238 225.7MB/s txt3 (54.92 %) +2.7% - BM_ZFlat/12 2475527 2443354 188.1MB/s txt4 (66.22 %) +1.3% - BM_ZFlat/13 694497 696654 702.6MB/s bin (18.11 %) -0.3% - BM_ZFlat/14 136929 129855 280.8MB/s sum (48.96 %) +5.4% - BM_ZFlat/15 17172 17124 235.4MB/s man (59.36 %) +0.3% - BM_ZFlat/16 190364 171763 658.4MB/s pb (19.64 %) +10.8% - BM_ZFlat/17 567285 555190 316.6MB/s gaviota (37.72 %) +2.2% - BM_ZCord/0 193490 187031 522.1MB/s html +3.5% - BM_ZCord/1 2427537 2415315 277.2MB/s urls +0.5% - BM_ZCord/2 85378 81412 1.5GB/s jpg +4.9% - BM_ZCord/3 121898 119419 753.3MB/s pdf +2.1% - BM_ZCord/4 779564 762961 512.0MB/s html4 +2.2% - BM_ZDataBuffer/0 213820 207272 471.1MB/s html +3.2% - BM_ZDataBuffer/1 2589010 2586495 258.9MB/s urls +0.1% - BM_ZDataBuffer/2 121871 118885 1018.4MB/s jpg +2.5% - BM_ZDataBuffer/3 145382 145986 616.2MB/s pdf -0.4% - BM_ZDataBuffer/4 868117 852754 458.1MB/s html4 +1.8% - Sum of all benchmarks 33771833 33744763 +0.1% - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@71 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 81f34784b7b812dcda956ee489dfdc74ec2da990 -Author: snappy.mirrorbot@gmail.com -Date: Sun Jan 6 19:21:26 2013 +0000 - - Adjust the Snappy open-source distribution for the changes in Google's - internal file API. - - R=sanjay - - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@70 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 698af469b47fe809905e2ed173ad84241de5800f -Author: snappy.mirrorbot@gmail.com -Date: Fri Jan 4 11:54:20 2013 +0000 - - Change a few ORs to additions where they don't matter. This helps the compiler - use the LEA instruction more efficiently, since e.g. a + (b << 2) can be encoded - as one instruction. Even more importantly, it can constant-fold the - COPY_* enums together with the shifted negative constants, which also saves - some instructions. (We don't need it for LITERAL, since it happens to be 0.) - - I am unsure why the compiler couldn't do this itself, but the theory is that - it cannot prove that len-1 and len-4 cannot underflow/wrap, and thus can't - do the optimization safely. - - The gains are small but measurable; 0.5-1.0% over the BM_Z* benchmarks - (measured on Westmere, Sandy Bridge and Istanbul). - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@69 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 55209f9b92efd97e0a61be28ed94210de04c3bfc -Author: snappy.mirrorbot@gmail.com -Date: Mon Oct 8 11:37:16 2012 +0000 - - Stop giving -Werror to automake, due to an incompatibility between current - versions of libtool and automake on non-GNU platforms (e.g. Mac OS X). - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@68 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit b86e81c8b3426a62d8ab3a7674c2506e9e678740 -Author: snappy.mirrorbot@gmail.com -Date: Fri Aug 17 13:54:47 2012 +0000 - - Fix public issue 66: Document GetUncompressedLength better, in particular that - it leaves the source in a state that's not appropriate for RawUncompress. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@67 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 2e225ba821b420ae28e1d427075d5589c1e892d9 -Author: snappy.mirrorbot@gmail.com -Date: Tue Jul 31 11:44:44 2012 +0000 - - Fix public issue 64: Check for at configure time, - since MSVC seemingly does not have it. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@66 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit e89f20ab46ee11050760c6d57f05c2a3825a911c -Author: snappy.mirrorbot@gmail.com -Date: Wed Jul 4 09:34:48 2012 +0000 - - Handle the case where gettimeofday() goes backwards or returns the same value - twice; it could cause division by zero in the unit test framework. - (We already had one fix for this in place, but it was incomplete.) - - This could in theory happen on any system, since there are few guarantees - about gettimeofday(), but seems to only happen in practice on GNU/Hurd, where - gettimeofday() is cached and only updated ever so often. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@65 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 3ec60ac9878de5d0317ad38fc545080a4bfaa74f -Author: snappy.mirrorbot@gmail.com -Date: Wed Jul 4 09:28:33 2012 +0000 - - Mark ARMv4 as not supporting unaligned accesses (not just ARMv5 and ARMv6); - apparently Debian still targets these by default, giving us segfaults on - armel. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@64 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit be80d6f74f9d82220e952a54f3f129aae1f13f95 -Author: snappy.mirrorbot@gmail.com -Date: Tue May 22 09:46:05 2012 +0000 - - Fix public bug #62: Remove an extraneous comma at the end of an enum list, - causing compile errors when embedded in Mozilla on OpenBSD. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@63 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 8b95464146dddab1c7068f879162db9a885cdafe -Author: snappy.mirrorbot@gmail.com -Date: Tue May 22 09:32:50 2012 +0000 - - Snappy library no longer depends on iostream. - - Achieved by moving logging macro definitions to a test-only - header file, and by changing non-test code to use assert, - fprintf, and abort instead of LOG/CHECK macros. - - R=sesse - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@62 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit fc723b212d6972af7051261754770b3f70a7dc03 -Author: snappy.mirrorbot@gmail.com -Date: Fri Feb 24 15:46:37 2012 +0000 - - Release Snappy 1.0.5. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@61 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit dc63e0ad9693e13390ba31b00d92ecccaf7605c3 -Author: snappy.mirrorbot@gmail.com -Date: Thu Feb 23 17:00:36 2012 +0000 - - For 32-bit platforms, do not try to accelerate multiple neighboring - 32-bit loads with a 64-bit load during compression (it's not a win). - - The main target for this optimization is ARM, but 32-bit x86 gets - a small gain, too, although there is noise in the microbenchmarks. - It's a no-op for 64-bit x86. It does not affect decompression. - - Microbenchmark results on a Cortex-A9 1GHz, using g++ 4.6.2 (from - Ubuntu/Linaro), -O2 -DNDEBUG -Wa,-march=armv7a -mtune=cortex-a9 - -mthumb-interwork, minimum 1000 iterations: - - Benchmark Time(ns) CPU(ns) Iterations - --------------------------------------------------- - BM_ZFlat/0 1158277 1160000 1000 84.2MB/s html (23.57 %) [ +4.3%] - BM_ZFlat/1 14861782 14860000 1000 45.1MB/s urls (50.89 %) [ +1.1%] - BM_ZFlat/2 393595 390000 1000 310.5MB/s jpg (99.88 %) [ +0.0%] - BM_ZFlat/3 650583 650000 1000 138.4MB/s pdf (82.13 %) [ +3.1%] - BM_ZFlat/4 4661480 4660000 1000 83.8MB/s html4 (23.55 %) [ +4.3%] - BM_ZFlat/5 491973 490000 1000 47.9MB/s cp (48.12 %) [ +2.0%] - BM_ZFlat/6 193575 192678 1038 55.2MB/s c (42.40 %) [ +9.0%] - BM_ZFlat/7 62343 62754 3187 56.5MB/s lsp (48.37 %) [ +2.6%] - BM_ZFlat/8 17708468 17710000 1000 55.5MB/s xls (41.34 %) [ -0.3%] - BM_ZFlat/9 3755345 3760000 1000 38.6MB/s txt1 (59.81 %) [ +8.2%] - BM_ZFlat/10 3324217 3320000 1000 36.0MB/s txt2 (64.07 %) [ +4.2%] - BM_ZFlat/11 10139932 10140000 1000 40.1MB/s txt3 (57.11 %) [ +6.4%] - BM_ZFlat/12 13532109 13530000 1000 34.0MB/s txt4 (68.35 %) [ +5.0%] - BM_ZFlat/13 4690847 4690000 1000 104.4MB/s bin (18.21 %) [ +4.1%] - BM_ZFlat/14 830682 830000 1000 43.9MB/s sum (51.88 %) [ +1.2%] - BM_ZFlat/15 84784 85011 2235 47.4MB/s man (59.36 %) [ +1.1%] - BM_ZFlat/16 1293254 1290000 1000 87.7MB/s pb (23.15 %) [ +2.3%] - BM_ZFlat/17 2775155 2780000 1000 63.2MB/s gaviota (38.27 %) [+12.2%] - - Core i7 in 32-bit mode (only one run and 100 iterations, though, so noisy): - - Benchmark Time(ns) CPU(ns) Iterations - --------------------------------------------------- - BM_ZFlat/0 227582 223464 3043 437.0MB/s html (23.57 %) [ +7.4%] - BM_ZFlat/1 2982430 2918455 233 229.4MB/s urls (50.89 %) [ +2.9%] - BM_ZFlat/2 46967 46658 15217 2.5GB/s jpg (99.88 %) [ +0.0%] - BM_ZFlat/3 115298 114864 5833 783.2MB/s pdf (82.13 %) [ +1.5%] - BM_ZFlat/4 913440 899743 778 434.2MB/s html4 (23.55 %) [ +0.3%] - BM_ZFlat/5 110302 108571 7000 216.1MB/s cp (48.12 %) [ +0.0%] - BM_ZFlat/6 44409 43372 15909 245.2MB/s c (42.40 %) [ +0.8%] - BM_ZFlat/7 15713 15643 46667 226.9MB/s lsp (48.37 %) [ +2.7%] - BM_ZFlat/8 2625539 2602230 269 377.4MB/s xls (41.34 %) [ +1.4%] - BM_ZFlat/9 808884 811429 875 178.8MB/s txt1 (59.81 %) [ -3.9%] - BM_ZFlat/10 709532 700000 1000 170.5MB/s txt2 (64.07 %) [ +0.0%] - BM_ZFlat/11 2177682 2162162 333 188.2MB/s txt3 (57.11 %) [ -1.4%] - BM_ZFlat/12 2849640 2840000 250 161.8MB/s txt4 (68.35 %) [ -1.4%] - BM_ZFlat/13 849760 835476 778 585.8MB/s bin (18.21 %) [ +1.2%] - BM_ZFlat/14 165940 164571 4375 221.6MB/s sum (51.88 %) [ +1.4%] - BM_ZFlat/15 20939 20571 35000 196.0MB/s man (59.36 %) [ +2.1%] - BM_ZFlat/16 239209 236544 2917 478.1MB/s pb (23.15 %) [ +4.2%] - BM_ZFlat/17 616206 610000 1000 288.2MB/s gaviota (38.27 %) [ -1.6%] - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@60 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit f8829ea39d51432ba4e6a26ddaec57acea779f4c -Author: snappy.mirrorbot@gmail.com -Date: Tue Feb 21 17:02:17 2012 +0000 - - Enable the use of unaligned loads and stores for ARM-based architectures - where they are available (ARMv7 and higher). This gives a significant - speed boost on ARM, both for compression and decompression. - It should not affect x86 at all. - - There are more changes possible to speed up ARM, but it might not be - that easy to do without hurting x86 or making the code uglier. - Also, we de not try to use NEON yet. - - Microbenchmark results on a Cortex-A9 1GHz, using g++ 4.6.2 (from Ubuntu/Linaro), - -O2 -DNDEBUG -Wa,-march=armv7a -mtune=cortex-a9 -mthumb-interwork: - - Benchmark Time(ns) CPU(ns) Iterations - --------------------------------------------------- - BM_UFlat/0 524806 529100 378 184.6MB/s html [+33.6%] - BM_UFlat/1 5139790 5200000 100 128.8MB/s urls [+28.8%] - BM_UFlat/2 86540 84166 1901 1.4GB/s jpg [ +0.6%] - BM_UFlat/3 215351 210176 904 428.0MB/s pdf [+29.8%] - BM_UFlat/4 2144490 2100000 100 186.0MB/s html4 [+33.3%] - BM_UFlat/5 194482 190000 1000 123.5MB/s cp [+36.2%] - BM_UFlat/6 91843 90175 2107 117.9MB/s c [+38.6%] - BM_UFlat/7 28535 28426 6684 124.8MB/s lsp [+34.7%] - BM_UFlat/8 9206600 9200000 100 106.7MB/s xls [+42.4%] - BM_UFlat/9 1865273 1886792 106 76.9MB/s txt1 [+32.5%] - BM_UFlat/10 1576809 1587301 126 75.2MB/s txt2 [+32.3%] - BM_UFlat/11 4968450 4900000 100 83.1MB/s txt3 [+32.7%] - BM_UFlat/12 6673970 6700000 100 68.6MB/s txt4 [+32.8%] - BM_UFlat/13 2391470 2400000 100 203.9MB/s bin [+29.2%] - BM_UFlat/14 334601 344827 522 105.8MB/s sum [+30.6%] - BM_UFlat/15 37404 38080 5252 105.9MB/s man [+33.8%] - BM_UFlat/16 535470 540540 370 209.2MB/s pb [+31.2%] - BM_UFlat/17 1875245 1886792 106 93.2MB/s gaviota [+37.8%] - BM_UValidate/0 178425 179533 1114 543.9MB/s html [ +2.7%] - BM_UValidate/1 2100450 2000000 100 334.8MB/s urls [ +5.0%] - BM_UValidate/2 1039 1044 172413 113.3GB/s jpg [ +3.4%] - BM_UValidate/3 59423 59470 3363 1.5GB/s pdf [ +7.8%] - BM_UValidate/4 760716 766283 261 509.8MB/s html4 [ +6.5%] - BM_ZFlat/0 1204632 1204819 166 81.1MB/s html (23.57 %) [+32.8%] - BM_ZFlat/1 15656190 15600000 100 42.9MB/s urls (50.89 %) [+27.6%] - BM_ZFlat/2 403336 410677 487 294.8MB/s jpg (99.88 %) [+16.5%] - BM_ZFlat/3 664073 671140 298 134.0MB/s pdf (82.13 %) [+28.4%] - BM_ZFlat/4 4961940 4900000 100 79.7MB/s html4 (23.55 %) [+30.6%] - BM_ZFlat/5 500664 501253 399 46.8MB/s cp (48.12 %) [+33.4%] - BM_ZFlat/6 217276 215982 926 49.2MB/s c (42.40 %) [+25.0%] - BM_ZFlat/7 64122 65487 3054 54.2MB/s lsp (48.37 %) [+36.1%] - BM_ZFlat/8 18045730 18000000 100 54.6MB/s xls (41.34 %) [+34.4%] - BM_ZFlat/9 4051530 4000000 100 36.3MB/s txt1 (59.81 %) [+25.0%] - BM_ZFlat/10 3451800 3500000 100 34.1MB/s txt2 (64.07 %) [+25.7%] - BM_ZFlat/11 11052340 11100000 100 36.7MB/s txt3 (57.11 %) [+24.3%] - BM_ZFlat/12 14538690 14600000 100 31.5MB/s txt4 (68.35 %) [+24.7%] - BM_ZFlat/13 5041850 5000000 100 97.9MB/s bin (18.21 %) [+32.0%] - BM_ZFlat/14 908840 909090 220 40.1MB/s sum (51.88 %) [+22.2%] - BM_ZFlat/15 86921 86206 1972 46.8MB/s man (59.36 %) [+42.2%] - BM_ZFlat/16 1312315 1315789 152 86.0MB/s pb (23.15 %) [+34.5%] - BM_ZFlat/17 3173120 3200000 100 54.9MB/s gaviota (38.27%) [+28.1%] - - - The move from 64-bit to 32-bit operations for the copies also affected 32-bit x86; - positive on the decompression side, and slightly negative on the compression side - (unless that is noise; I only ran once): - - Benchmark Time(ns) CPU(ns) Iterations - ----------------------------------------------------- - BM_UFlat/0 86279 86140 7778 1.1GB/s html [ +7.5%] - BM_UFlat/1 839265 822622 778 813.9MB/s urls [ +9.4%] - BM_UFlat/2 9180 9143 87500 12.9GB/s jpg [ +1.2%] - BM_UFlat/3 35080 35000 20000 2.5GB/s pdf [+10.1%] - BM_UFlat/4 350318 345000 2000 1.1GB/s html4 [ +7.0%] - BM_UFlat/5 33808 33472 21212 701.0MB/s cp [ +9.0%] - BM_UFlat/6 15201 15214 46667 698.9MB/s c [+14.9%] - BM_UFlat/7 4652 4651 159091 762.9MB/s lsp [ +7.5%] - BM_UFlat/8 1285551 1282528 538 765.7MB/s xls [+10.7%] - BM_UFlat/9 282510 281690 2414 514.9MB/s txt1 [+13.6%] - BM_UFlat/10 243494 239286 2800 498.9MB/s txt2 [+14.4%] - BM_UFlat/11 743625 740000 1000 550.0MB/s txt3 [+14.3%] - BM_UFlat/12 999441 989717 778 464.3MB/s txt4 [+16.1%] - BM_UFlat/13 412402 410076 1707 1.2GB/s bin [ +7.3%] - BM_UFlat/14 54876 54000 10000 675.3MB/s sum [+13.0%] - BM_UFlat/15 6146 6100 100000 660.8MB/s man [+14.8%] - BM_UFlat/16 90496 90286 8750 1.2GB/s pb [ +4.0%] - BM_UFlat/17 292650 292000 2500 602.0MB/s gaviota [+18.1%] - BM_UValidate/0 49620 49699 14286 1.9GB/s html [ +0.0%] - BM_UValidate/1 501371 500000 1000 1.3GB/s urls [ +0.0%] - BM_UValidate/2 232 227 3043478 521.5GB/s jpg [ +1.3%] - BM_UValidate/3 17250 17143 43750 5.1GB/s pdf [ -1.3%] - BM_UValidate/4 198643 200000 3500 1.9GB/s html4 [ -0.9%] - BM_ZFlat/0 227128 229415 3182 425.7MB/s html (23.57 %) [ -1.4%] - BM_ZFlat/1 2970089 2960000 250 226.2MB/s urls (50.89 %) [ -1.9%] - BM_ZFlat/2 45683 44999 15556 2.6GB/s jpg (99.88 %) [ +2.2%] - BM_ZFlat/3 114661 113136 6364 795.1MB/s pdf (82.13 %) [ -1.5%] - BM_ZFlat/4 919702 914286 875 427.2MB/s html4 (23.55%) [ -1.3%] - BM_ZFlat/5 108189 108422 6364 216.4MB/s cp (48.12 %) [ -1.2%] - BM_ZFlat/6 44525 44000 15909 241.7MB/s c (42.40 %) [ -2.9%] - BM_ZFlat/7 15973 15857 46667 223.8MB/s lsp (48.37 %) [ +0.0%] - BM_ZFlat/8 2677888 2639405 269 372.1MB/s xls (41.34 %) [ -1.4%] - BM_ZFlat/9 800715 780000 1000 186.0MB/s txt1 (59.81 %) [ -0.4%] - BM_ZFlat/10 700089 700000 1000 170.5MB/s txt2 (64.07 %) [ -2.9%] - BM_ZFlat/11 2159356 2138365 318 190.3MB/s txt3 (57.11 %) [ -0.3%] - BM_ZFlat/12 2796143 2779923 259 165.3MB/s txt4 (68.35 %) [ -1.4%] - BM_ZFlat/13 856458 835476 778 585.8MB/s bin (18.21 %) [ -0.1%] - BM_ZFlat/14 166908 166857 4375 218.6MB/s sum (51.88 %) [ -1.4%] - BM_ZFlat/15 21181 20857 35000 193.3MB/s man (59.36 %) [ -0.8%] - BM_ZFlat/16 244009 239973 2917 471.3MB/s pb (23.15 %) [ -1.4%] - BM_ZFlat/17 596362 590000 1000 297.9MB/s gaviota (38.27%) [ +0.0%] - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@59 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit f2e184f638bdc7905f26c24faaf10fc0f5d33403 -Author: snappy.mirrorbot@gmail.com -Date: Sat Feb 11 22:11:22 2012 +0000 - - Lower the size allocated in the "corrupted input" unit test from 256 MB - to 2 MB. This fixes issues with running the unit test on platforms with - little RAM (e.g. some ARM boards). - - Also, reactivate the 2 MB test for 64-bit platforms; there's no good - reason why it shouldn't be. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@58 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit e750dc0f054ba74b0ce76dd2013e6728cc7a41c5 -Author: snappy.mirrorbot@gmail.com -Date: Sun Jan 8 17:55:48 2012 +0000 - - Minor refactoring to accomodate changes in Google's internal code tree. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@57 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit d9068ee301bdf893a4d8cb7c6518eacc44c4c1f2 -Author: snappy.mirrorbot@gmail.com -Date: Wed Jan 4 13:10:46 2012 +0000 - - Fix public issue r57: Fix most warnings with -Wall, mostly signed/unsigned - warnings. There are still some in the unit test, but the main .cc file should - be clean. We haven't enabled -Wall for the default build, since the unit test - is still not clean. - - This also fixes a real bug in the open-source implementation of - ReadFileToStringOrDie(); it would not detect errors correctly. - - I had to go through some pains to avoid performance loss as the types - were changed; I think there might still be some with 32-bit if and only if LFS - is enabled (ie., size_t is 64-bit), but for regular 32-bit and 64-bit I can't - see any losses, and I've diffed the generated GCC assembler between the old and - new code without seeing any significant choices. If anything, it's ever so - slightly faster. - - This may or may not enable compression of very large blocks (>2^32 bytes) - when size_t is 64-bit, but I haven't checked, and it is still not a supported - case. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@56 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 0755c815197dacc77d8971ae917c86d7aa96bf8e -Author: snappy.mirrorbot@gmail.com -Date: Wed Jan 4 10:46:39 2012 +0000 - - Add a framing format description. We do not have any implementation of this at - the current point, but there seems to be enough of a general interest in the - topic (cf. public bug #34). - - R=csilvers,sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@55 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit d7eb2dc4133794b62cba691f9be40d1549bc32e2 -Author: snappy.mirrorbot@gmail.com -Date: Mon Dec 5 21:27:26 2011 +0000 - - Speed up decompression by moving the refill check to the end of the loop. - - This seems to work because in most of the branches, the compiler can evaluate - “ip_limit_ - ip” in a more efficient way than reloading ip_limit_ from memory - (either by already having the entire expression in a register, or reconstructing - it from “avail”, or something else). Memory loads, even from L1, are seemingly - costly in the big picture at the current decompression speeds. - - Microbenchmarks (64-bit, opt mode): - - Westmere (Intel Core i7): - - Benchmark Time(ns) CPU(ns) Iterations - -------------------------------------------- - BM_UFlat/0 74492 74491 187894 1.3GB/s html [ +5.9%] - BM_UFlat/1 712268 712263 19644 940.0MB/s urls [ +3.8%] - BM_UFlat/2 10591 10590 1000000 11.2GB/s jpg [ -6.8%] - BM_UFlat/3 29643 29643 469915 3.0GB/s pdf [ +7.9%] - BM_UFlat/4 304669 304667 45930 1.3GB/s html4 [ +4.8%] - BM_UFlat/5 28508 28507 490077 823.1MB/s cp [ +4.0%] - BM_UFlat/6 12415 12415 1000000 856.5MB/s c [ +8.6%] - BM_UFlat/7 3415 3415 4084723 1039.0MB/s lsp [+18.0%] - BM_UFlat/8 979569 979563 14261 1002.5MB/s xls [ +5.8%] - BM_UFlat/9 230150 230148 60934 630.2MB/s txt1 [ +5.2%] - BM_UFlat/10 197167 197166 71135 605.5MB/s txt2 [ +4.7%] - BM_UFlat/11 607394 607390 23041 670.1MB/s txt3 [ +5.6%] - BM_UFlat/12 808502 808496 17316 568.4MB/s txt4 [ +5.0%] - BM_UFlat/13 372791 372788 37564 1.3GB/s bin [ +3.3%] - BM_UFlat/14 44541 44541 313969 818.8MB/s sum [ +5.7%] - BM_UFlat/15 4833 4833 2898697 834.1MB/s man [ +4.8%] - BM_UFlat/16 79855 79855 175356 1.4GB/s pb [ +4.8%] - BM_UFlat/17 245845 245843 56838 715.0MB/s gaviota [ +5.8%] - - Clovertown (Intel Core 2): - - Benchmark Time(ns) CPU(ns) Iterations - -------------------------------------------- - BM_UFlat/0 107911 107890 100000 905.1MB/s html [ +2.2%] - BM_UFlat/1 1011237 1011041 10000 662.3MB/s urls [ +2.5%] - BM_UFlat/2 26775 26770 523089 4.4GB/s jpg [ +0.0%] - BM_UFlat/3 48103 48095 290618 1.8GB/s pdf [ +3.4%] - BM_UFlat/4 437724 437644 31937 892.6MB/s html4 [ +2.1%] - BM_UFlat/5 39607 39600 358284 592.5MB/s cp [ +2.4%] - BM_UFlat/6 18227 18224 768191 583.5MB/s c [ +2.7%] - BM_UFlat/7 5171 5170 2709437 686.4MB/s lsp [ +3.9%] - BM_UFlat/8 1560291 1559989 8970 629.5MB/s xls [ +3.6%] - BM_UFlat/9 335401 335343 41731 432.5MB/s txt1 [ +3.0%] - BM_UFlat/10 287014 286963 48758 416.0MB/s txt2 [ +2.8%] - BM_UFlat/11 888522 888356 15752 458.1MB/s txt3 [ +2.9%] - BM_UFlat/12 1186600 1186378 10000 387.3MB/s txt4 [ +3.1%] - BM_UFlat/13 572295 572188 24468 855.4MB/s bin [ +2.1%] - BM_UFlat/14 64060 64049 218401 569.4MB/s sum [ +4.1%] - BM_UFlat/15 7264 7263 1916168 555.0MB/s man [ +1.4%] - BM_UFlat/16 108853 108836 100000 1039.1MB/s pb [ +1.7%] - BM_UFlat/17 364289 364223 38419 482.6MB/s gaviota [ +4.9%] - - Barcelona (AMD Opteron): - - Benchmark Time(ns) CPU(ns) Iterations - -------------------------------------------- - BM_UFlat/0 103900 103871 100000 940.2MB/s html [ +8.3%] - BM_UFlat/1 1000435 1000107 10000 669.5MB/s urls [ +6.6%] - BM_UFlat/2 24659 24652 567362 4.8GB/s jpg [ +0.1%] - BM_UFlat/3 48206 48193 291121 1.8GB/s pdf [ +5.0%] - BM_UFlat/4 421980 421850 33174 926.0MB/s html4 [ +7.3%] - BM_UFlat/5 40368 40357 346994 581.4MB/s cp [ +8.7%] - BM_UFlat/6 19836 19830 708695 536.2MB/s c [ +8.0%] - BM_UFlat/7 6100 6098 2292774 581.9MB/s lsp [ +9.0%] - BM_UFlat/8 1693093 1692514 8261 580.2MB/s xls [ +8.0%] - BM_UFlat/9 365991 365886 38225 396.4MB/s txt1 [ +7.1%] - BM_UFlat/10 311330 311238 44950 383.6MB/s txt2 [ +7.6%] - BM_UFlat/11 975037 974737 14376 417.5MB/s txt3 [ +6.9%] - BM_UFlat/12 1303558 1303175 10000 352.6MB/s txt4 [ +7.3%] - BM_UFlat/13 517448 517290 27144 946.2MB/s bin [ +5.5%] - BM_UFlat/14 66537 66518 210352 548.3MB/s sum [ +7.5%] - BM_UFlat/15 7976 7974 1760383 505.6MB/s man [ +5.6%] - BM_UFlat/16 103121 103092 100000 1097.0MB/s pb [ +8.7%] - BM_UFlat/17 391431 391314 35733 449.2MB/s gaviota [ +6.5%] - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@54 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 5ed51ce15fc4ff8d2f7235704eb6b0c3f762fb88 -Author: snappy.mirrorbot@gmail.com -Date: Wed Nov 23 11:14:17 2011 +0000 - - Speed up decompression by making the fast path for literals faster. - - We do the fast-path step as soon as possible; in fact, as soon as we know the - literal length. Since we usually hit the fast path, we can then skip the checks - for long literals and available input space (beyond what the fast path check - already does). - - Note that this changes the decompression Writer API; however, it does not - change the ABI, since writers are always templatized and as such never - cross compilation units. The new API is slightly more general, in that it - doesn't hard-code the value 16. Note that we also take care to check - for len <= 16 first, since the other two checks almost always succeed - (so we don't want to waste time checking for them until we have to). - - The improvements are most marked on Nehalem, but are generally positive - on other platforms as well. All microbenchmarks are 64-bit, opt. - - Clovertown (Core 2): - - Benchmark Time(ns) CPU(ns) Iterations - -------------------------------------------- - BM_UFlat/0 110226 110224 100000 886.0MB/s html [ +1.5%] - BM_UFlat/1 1036523 1036508 10000 646.0MB/s urls [ -0.8%] - BM_UFlat/2 26775 26775 522570 4.4GB/s jpg [ +0.0%] - BM_UFlat/3 49738 49737 280974 1.8GB/s pdf [ +0.3%] - BM_UFlat/4 446790 446792 31334 874.3MB/s html4 [ +0.8%] - BM_UFlat/5 40561 40562 350424 578.5MB/s cp [ +1.3%] - BM_UFlat/6 18722 18722 746903 568.0MB/s c [ +1.4%] - BM_UFlat/7 5373 5373 2608632 660.5MB/s lsp [ +8.3%] - BM_UFlat/8 1615716 1615718 8670 607.8MB/s xls [ +2.0%] - BM_UFlat/9 345278 345281 40481 420.1MB/s txt1 [ +1.4%] - BM_UFlat/10 294855 294855 47452 404.9MB/s txt2 [ +1.6%] - BM_UFlat/11 914263 914263 15316 445.2MB/s txt3 [ +1.1%] - BM_UFlat/12 1222694 1222691 10000 375.8MB/s txt4 [ +1.4%] - BM_UFlat/13 584495 584489 23954 837.4MB/s bin [ -0.6%] - BM_UFlat/14 66662 66662 210123 547.1MB/s sum [ +1.2%] - BM_UFlat/15 7368 7368 1881856 547.1MB/s man [ +4.0%] - BM_UFlat/16 110727 110726 100000 1021.4MB/s pb [ +2.3%] - BM_UFlat/17 382138 382141 36616 460.0MB/s gaviota [ -0.7%] - - Westmere (Core i7): - - Benchmark Time(ns) CPU(ns) Iterations - -------------------------------------------- - BM_UFlat/0 78861 78853 177703 1.2GB/s html [ +2.1%] - BM_UFlat/1 739560 739491 18912 905.4MB/s urls [ +3.4%] - BM_UFlat/2 9867 9866 1419014 12.0GB/s jpg [ +3.4%] - BM_UFlat/3 31989 31986 438385 2.7GB/s pdf [ +0.2%] - BM_UFlat/4 319406 319380 43771 1.2GB/s html4 [ +1.9%] - BM_UFlat/5 29639 29636 472862 791.7MB/s cp [ +5.2%] - BM_UFlat/6 13478 13477 1000000 789.0MB/s c [ +2.3%] - BM_UFlat/7 4030 4029 3475364 880.7MB/s lsp [ +8.7%] - BM_UFlat/8 1036585 1036492 10000 947.5MB/s xls [ +6.9%] - BM_UFlat/9 242127 242105 57838 599.1MB/s txt1 [ +3.0%] - BM_UFlat/10 206499 206480 67595 578.2MB/s txt2 [ +3.4%] - BM_UFlat/11 641635 641570 21811 634.4MB/s txt3 [ +2.4%] - BM_UFlat/12 848847 848769 16443 541.4MB/s txt4 [ +3.1%] - BM_UFlat/13 384968 384938 36366 1.2GB/s bin [ +0.3%] - BM_UFlat/14 47106 47101 297770 774.3MB/s sum [ +4.4%] - BM_UFlat/15 5063 5063 2772202 796.2MB/s man [ +7.7%] - BM_UFlat/16 83663 83656 167697 1.3GB/s pb [ +1.8%] - BM_UFlat/17 260224 260198 53823 675.6MB/s gaviota [ -0.5%] - - Barcelona (Opteron): - - Benchmark Time(ns) CPU(ns) Iterations - -------------------------------------------- - BM_UFlat/0 112490 112457 100000 868.4MB/s html [ -0.4%] - BM_UFlat/1 1066719 1066339 10000 627.9MB/s urls [ +1.0%] - BM_UFlat/2 24679 24672 563802 4.8GB/s jpg [ +0.7%] - BM_UFlat/3 50603 50589 277285 1.7GB/s pdf [ +2.6%] - BM_UFlat/4 452982 452849 30900 862.6MB/s html4 [ -0.2%] - BM_UFlat/5 43860 43848 319554 535.1MB/s cp [ +1.2%] - BM_UFlat/6 21419 21413 653573 496.6MB/s c [ +1.0%] - BM_UFlat/7 6646 6645 2105405 534.1MB/s lsp [ +0.3%] - BM_UFlat/8 1828487 1827886 7658 537.3MB/s xls [ +2.6%] - BM_UFlat/9 391824 391714 35708 370.3MB/s txt1 [ +2.2%] - BM_UFlat/10 334913 334816 41885 356.6MB/s txt2 [ +1.7%] - BM_UFlat/11 1042062 1041674 10000 390.7MB/s txt3 [ +1.1%] - BM_UFlat/12 1398902 1398456 10000 328.6MB/s txt4 [ +1.7%] - BM_UFlat/13 545706 545530 25669 897.2MB/s bin [ -0.4%] - BM_UFlat/14 71512 71505 196035 510.0MB/s sum [ +1.4%] - BM_UFlat/15 8422 8421 1665036 478.7MB/s man [ +2.6%] - BM_UFlat/16 112053 112048 100000 1009.3MB/s pb [ -0.4%] - BM_UFlat/17 416723 416713 33612 421.8MB/s gaviota [ -2.0%] - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@53 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 0c1b9c3904430f5b399bd057d76de4bc36b7a123 -Author: snappy.mirrorbot@gmail.com -Date: Tue Nov 8 14:46:39 2011 +0000 - - Fix public issue #53: Update the README to the API we actually open-sourced - with. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@52 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit b61134bc0a6a904b41522b4e5c9e80874c730cef -Author: snappy.mirrorbot@gmail.com -Date: Wed Oct 5 12:27:12 2011 +0000 - - In the format description, use a clearer example to emphasize that varints are - stored in little-endian. Patch from Christian von Roques. - - R=csilvers - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@51 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 21a2e4f55758e759302cd84ad0f3580affcba7d9 -Author: snappy.mirrorbot@gmail.com -Date: Thu Sep 15 19:34:06 2011 +0000 - - Release Snappy 1.0.4. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@50 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit e2e303286813c759c5b1cdb46dad63c494f0a061 -Author: snappy.mirrorbot@gmail.com -Date: Thu Sep 15 09:50:05 2011 +0000 - - Fix public issue #50: Include generic byteswap macros. - Also include Solaris 10 and FreeBSD versions. - - R=csilvers - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@49 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 593002da3c051f4721312869f816b41485bad3b7 -Author: snappy.mirrorbot@gmail.com -Date: Wed Aug 10 18:57:27 2011 +0000 - - Partially fix public issue 50: Remove an extra comma from the end of some - enum declarations, as it seems the Sun compiler does not like it. - - Based on patch by Travis Vitek. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@48 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit f1063a5dc43891eed37f0586bfea57b84dddd756 -Author: snappy.mirrorbot@gmail.com -Date: Wed Aug 10 18:44:16 2011 +0000 - - Use the right #ifdef test for sys/mman.h. - - Based on patch by Travis Vitek. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@47 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 41c827a2fa9ce048202d941187f211180feadde4 -Author: snappy.mirrorbot@gmail.com -Date: Wed Aug 10 01:22:09 2011 +0000 - - Fix public issue #47: Small comment cleanups in the unit test. - - Originally based on a patch by Patrick Pelletier. - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@46 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 59aeffa6049b5c2a3a467e7602c1f93630b870e7 -Author: snappy.mirrorbot@gmail.com -Date: Wed Aug 10 01:14:43 2011 +0000 - - Fix public issue #46: Format description said "3-byte offset" - instead of "4-byte offset" for the longest copies. - - Also fix an inconsistency in the heading for section 2.2.3. - Both patches by Patrick Pelletier. - - R=csilvers - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@45 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 57e7cd72559cb022ef32856f2252a4c4585e562e -Author: snappy.mirrorbot@gmail.com -Date: Tue Jun 28 11:40:25 2011 +0000 - - Fix public issue #44: Make the definition and declaration of CompressFragment - identical, even regarding cv-qualifiers. - - This is required to work around a bug in the Solaris Studio C++ compiler - (it does not properly disregard cv-qualifiers when doing name mangling). - - R=sanjay - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@44 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 13c4a449a8ea22139c9aa441e8024eebc9dbdf6e -Author: snappy.mirrorbot@gmail.com -Date: Sat Jun 4 10:19:05 2011 +0000 - - Correct an inaccuracy in the Snappy format description. - (I stumbled into this when changing the way we decompress literals.) - - R=csilvers - - Revision created by MOE tool push_codebase. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@43 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit f5406737403119e1483a71d2084d17728663a114 -Author: snappy.mirrorbot@gmail.com -Date: Fri Jun 3 20:53:06 2011 +0000 - - Speed up decompression by removing a fast-path attempt. - - Whenever we try to enter a copy fast-path, there is a certain cost in checking - that all the preconditions are in place, but it's normally offset by the fact - that we can usually take the cheaper path. However, in a certain path we've - already established that "avail < literal_length", which usually means that - either the available space is small, or the literal is big. Both will disqualify - us from taking the fast path, and thus we take the hit from the precondition - checking without gaining much from having a fast path. Thus, simply don't try - the fast path in this situation -- we're already on a slow path anyway - (one where we need to refill more data from the reader). - - I'm a bit surprised at how much this gained; it could be that this path is - more common than I thought, or that the simpler structure somehow makes the - compiler happier. I haven't looked at the assembler, but it's a win across - the board on both Core 2, Core i7 and Opteron, at least for the cases we - typically care about. The gains seem to be the largest on Core i7, though. - Results from my Core i7 workstation: - - - Benchmark Time(ns) CPU(ns) Iterations - --------------------------------------------------- - BM_UFlat/0 73337 73091 190996 1.3GB/s html [ +1.7%] - BM_UFlat/1 696379 693501 20173 965.5MB/s urls [ +2.7%] - BM_UFlat/2 9765 9734 1472135 12.1GB/s jpg [ +0.7%] - BM_UFlat/3 29720 29621 472973 3.0GB/s pdf [ +1.8%] - BM_UFlat/4 294636 293834 47782 1.3GB/s html4 [ +2.3%] - BM_UFlat/5 28399 28320 494700 828.5MB/s cp [ +3.5%] - BM_UFlat/6 12795 12760 1000000 833.3MB/s c [ +1.2%] - BM_UFlat/7 3984 3973 3526448 893.2MB/s lsp [ +5.7%] - BM_UFlat/8 991996 989322 14141 992.6MB/s xls [ +3.3%] - BM_UFlat/9 228620 227835 61404 636.6MB/s txt1 [ +4.0%] - BM_UFlat/10 197114 196494 72165 607.5MB/s txt2 [ +3.5%] - BM_UFlat/11 605240 603437 23217 674.4MB/s txt3 [ +3.7%] - BM_UFlat/12 804157 802016 17456 573.0MB/s txt4 [ +3.9%] - BM_UFlat/13 347860 346998 40346 1.4GB/s bin [ +1.2%] - BM_UFlat/14 44684 44559 315315 818.4MB/s sum [ +2.3%] - BM_UFlat/15 5120 5106 2739726 789.4MB/s man [ +3.3%] - BM_UFlat/16 76591 76355 183486 1.4GB/s pb [ +2.8%] - BM_UFlat/17 238564 237828 58824 739.1MB/s gaviota [ +1.6%] - BM_UValidate/0 42194 42060 333333 2.3GB/s html [ -0.1%] - BM_UValidate/1 433182 432005 32407 1.5GB/s urls [ -0.1%] - BM_UValidate/2 197 196 71428571 603.3GB/s jpg [ +0.5%] - BM_UValidate/3 14494 14462 972222 6.1GB/s pdf [ +0.5%] - BM_UValidate/4 168444 167836 83832 2.3GB/s html4 [ +0.1%] - - R=jeff - - Revision created by MOE tool push_codebase. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@42 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 197f3ee9f9397e98c9abf07f9da875fbcb725dba -Author: snappy.mirrorbot@gmail.com -Date: Fri Jun 3 20:47:14 2011 +0000 - - Speed up decompression by not needing a lookup table for literal items. - - Looking up into and decoding the values from char_table has long shown up as a - hotspot in the decompressor. While it turns out that it's hard to make a more - efficient decoder for the copy ops, the literals are simple enough that we can - decode them without needing a table lookup. (This means that 1/4 of the table - is now unused, although that in itself doesn't buy us anything.) - - The gains are small, but definitely present; some tests win as much as 10%, - but 1-4% is more typical. These results are from Core i7, in 64-bit mode; - Core 2 and Opteron show similar results. (I've run with more iterations - than unusual to make sure the smaller gains don't drown entirely in noise.) - - Benchmark Time(ns) CPU(ns) Iterations - --------------------------------------------------- - BM_UFlat/0 74665 74428 182055 1.3GB/s html [ +3.1%] - BM_UFlat/1 714106 711997 19663 940.4MB/s urls [ +4.4%] - BM_UFlat/2 9820 9789 1427115 12.1GB/s jpg [ -1.2%] - BM_UFlat/3 30461 30380 465116 2.9GB/s pdf [ +0.8%] - BM_UFlat/4 301445 300568 46512 1.3GB/s html4 [ +2.2%] - BM_UFlat/5 29338 29263 479452 801.8MB/s cp [ +1.6%] - BM_UFlat/6 13004 12970 1000000 819.9MB/s c [ +2.1%] - BM_UFlat/7 4180 4168 3349282 851.4MB/s lsp [ +1.3%] - BM_UFlat/8 1026149 1024000 10000 959.0MB/s xls [+10.7%] - BM_UFlat/9 237441 236830 59072 612.4MB/s txt1 [ +0.3%] - BM_UFlat/10 203966 203298 69307 587.2MB/s txt2 [ +0.8%] - BM_UFlat/11 627230 625000 22400 651.2MB/s txt3 [ +0.7%] - BM_UFlat/12 836188 833979 16787 551.0MB/s txt4 [ +1.3%] - BM_UFlat/13 351904 350750 39886 1.4GB/s bin [ +3.8%] - BM_UFlat/14 45685 45562 308370 800.4MB/s sum [ +5.9%] - BM_UFlat/15 5286 5270 2656546 764.9MB/s man [ +1.5%] - BM_UFlat/16 78774 78544 178117 1.4GB/s pb [ +4.3%] - BM_UFlat/17 242270 241345 58091 728.3MB/s gaviota [ +1.2%] - BM_UValidate/0 42149 42000 333333 2.3GB/s html [ -3.0%] - BM_UValidate/1 432741 431303 32483 1.5GB/s urls [ +7.8%] - BM_UValidate/2 198 197 71428571 600.7GB/s jpg [+16.8%] - BM_UValidate/3 14560 14521 965517 6.1GB/s pdf [ -4.1%] - BM_UValidate/4 169065 168671 83832 2.3GB/s html4 [ -2.9%] - - R=jeff - - Revision created by MOE tool push_codebase. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@41 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 8efa2639e885ac467e7b11c662975c5844019fb9 -Author: snappy.mirrorbot@gmail.com -Date: Thu Jun 2 22:57:41 2011 +0000 - - Release Snappy 1.0.3. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@40 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 2e12124bd87f39296709decc65195fa5bfced538 -Author: snappy.mirrorbot@gmail.com -Date: Thu Jun 2 18:06:54 2011 +0000 - - Remove an unneeded goto in the decompressor; it turns out that the - state of ip_ after decompression (or attempted decompresion) is - completely irrelevant, so we don't need the trailer. - - Performance is, as expected, mostly flat -- there's a curious ~3-5% - loss in the "lsp" test, but that test case is so short it is hard to say - anything definitive about why (most likely, it's some sort of - unrelated effect). - - R=jeff - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@39 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit c266bbf32103f8ed4a83e2272ed3d8828d5b8b34 -Author: snappy.mirrorbot@gmail.com -Date: Thu Jun 2 17:59:40 2011 +0000 - - Speed up decompression by caching ip_. - - It is seemingly hard for the compiler to understand that ip_, the current input - pointer into the compressed data stream, can not alias on anything else, and - thus using it directly will incur memory traffic as it cannot be kept in a - register. The code already knew about this and cached it into a local - variable, but since Step() only decoded one tag, it had to move ip_ back into - place between every tag. This seems to have cost us a significant amount of - performance, so changing Step() into a function that decodes as much as it can - before it saves ip_ back and returns. (Note that Step() was already inlined, - so it is not the manual inlining that buys the performance here.) - - The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron - (for plain array-to-array decompression, in 64-bit opt mode). - - There is a tiny difference in the behavior here; if an invalid literal is - encountered (ie., the writer refuses the Append() operation), ip_ will now - point to the byte past the tag byte, instead of where the literal was - originally thought to end. However, we don't use ip_ for anything after - DecompressAllTags() has returned, so this should not change external behavior - in any way. - - Microbenchmark results for Core i7, 64-bit (Opteron results are similar): - - Benchmark Time(ns) CPU(ns) Iterations - --------------------------------------------------- - BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%] - BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%] - BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%] - BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%] - BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%] - BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%] - BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%] - BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%] - BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%] - BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%] - BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%] - BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%] - BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%] - BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%] - BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%] - BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%] - BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%] - BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%] - BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%] - BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%] - BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%] - BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%] - BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%] - - - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit d0ee043bc50c62c5b5ff3da044f0b5567257407d -Author: snappy.mirrorbot@gmail.com -Date: Tue May 17 08:48:25 2011 +0000 - - Fix the numbering of the headlines in the Snappy format description. - - R=csilvers - DELTA=4 (0 added, 0 deleted, 4 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1906 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@37 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 6c7053871fbdb459c9c14287a138d7f82d6d84a1 -Author: snappy.mirrorbot@gmail.com -Date: Mon May 16 08:59:18 2011 +0000 - - Fix public issue #32: Add compressed format documentation for Snappy. - This text is new, but an earlier version from Zeev Tarantov was used - as reference. - - R=csilvers - DELTA=112 (111 added, 0 deleted, 1 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1867 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@36 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit a1f9f9973d127992f341d442969c86fd9a0847c9 -Author: snappy.mirrorbot@gmail.com -Date: Mon May 9 21:29:02 2011 +0000 - - Fix public issue #39: Pick out the median runs based on CPU time, - not real time. Also, use nth_element instead of sort, since we - only need one element. - - R=csilvers - DELTA=5 (3 added, 0 deleted, 2 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1799 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@35 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit f7b105683c074cdf233740089e245e43f63e7e55 -Author: snappy.mirrorbot@gmail.com -Date: Mon May 9 21:28:45 2011 +0000 - - Fix public issue #38: Make the microbenchmark framework handle - properly cases where gettimeofday() can stand return the same - result twice (as sometimes on GNU/Hurd) or go backwards - (as when the user adjusts the clock). We avoid a division-by-zero, - and put a lower bound on the number of iterations -- the same - amount as we use to calibrate. - - We should probably use CLOCK_MONOTONIC for platforms that support - it, to be robust against clock adjustments; we already use Windows' - monotonic timers. However, that's for a later changelist. - - R=csilvers - DELTA=7 (5 added, 0 deleted, 2 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1798 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@34 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit d8d481427a05b88cdb0810c29bf400153595c423 -Author: snappy.mirrorbot@gmail.com -Date: Tue May 3 23:22:52 2011 +0000 - - Fix public issue #37: Only link snappy_unittest against -lz and other autodetected - libraries, not libsnappy.so (which doesn't need any such dependency). - - R=csilvers - DELTA=20 (14 added, 0 deleted, 6 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1710 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@33 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit bcecf195c0aeb2c98144d3d54b4d8d228774f50d -Author: snappy.mirrorbot@gmail.com -Date: Tue May 3 23:22:33 2011 +0000 - - Release Snappy 1.0.2, to get the license change and various other fixes into - a release. - - R=csilvers - DELTA=239 (236 added, 0 deleted, 3 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1709 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@32 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 84d9f642025cda672dda0d94a8008f094500aaa6 -Author: snappy.mirrorbot@gmail.com -Date: Tue Apr 26 12:34:55 2011 +0000 - - Fix public issue #30: Stop using gettimeofday() altogether on Win32, - as MSVC doesn't include it. Replace with QueryPerformanceCounter(), - which is monotonic and probably reasonably high-resolution. - (Some machines have traditionally had bugs in QPC, but they should - be relatively rare these days, and there's really no much better - alternative that I know of.) - - R=csilvers - DELTA=74 (55 added, 19 deleted, 0 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1556 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@31 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 3d8e71df8d30f980d71d4c784ebfc5ff62d5b0cb -Author: snappy.mirrorbot@gmail.com -Date: Tue Apr 26 12:34:37 2011 +0000 - - Fix public issue #31: Don't reset PATH in autogen.sh; instead, do the trickery - we need for our own build system internally. - - R=csilvers - DELTA=16 (13 added, 1 deleted, 2 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1555 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@30 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 73987351de54c88e2fc3f5dcdeceb47708df3585 -Author: snappy.mirrorbot@gmail.com -Date: Fri Apr 15 22:55:56 2011 +0000 - - When including , define WIN32_LEAN_AND_MEAN first, - so we won't pull in macro definitions of things like min() and max(), - which can conflict with . - - R=csilvers - DELTA=1 (1 added, 0 deleted, 0 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1485 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@29 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit fb7e0eade471a20b009720a84fea0af1552791d5 -Author: snappy.mirrorbot@gmail.com -Date: Mon Apr 11 09:07:01 2011 +0000 - - Fix public issue #29: Write CPU timing code for Windows, based on GetProcessTimes() - instead of getursage(). - - I thought I'd already committed this patch, so that the 1.0.1 release already - would have a Windows-compatible snappy_unittest, but I'd seemingly deleted it - instead, so this is a reconstruction. - - R=csilvers - DELTA=43 (39 added, 3 deleted, 1 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1295 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@28 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit c67fa0c755a329000da5546fff79089d62ac2f82 -Author: snappy.mirrorbot@gmail.com -Date: Fri Apr 8 09:51:53 2011 +0000 - - Include C bindings of Snappy, contributed by Martin Gieseking. - - I've made a few changes since Martin's version; mostly style nits, but also - a semantic change -- most functions that return bool in the C++ version now - return an enum, to better match typical C (and zlib) semantics. - - I've kept the copyright notice, since Martin is obviously the author here; - he has signed the contributor license agreement, though, so this should not - hinder Google's use in the future. - - We'll need to update the libtool version number to match the added interface, - but as of http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html - I'm going to wait until public release. - - R=csilvers - DELTA=238 (233 added, 0 deleted, 5 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1294 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@27 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 56be85cb9ae06f2e92180ae2575bdd10c012ab73 -Author: snappy.mirrorbot@gmail.com -Date: Thu Apr 7 16:36:43 2011 +0000 - - Replace geo.protodata with a newer version. - - The data compresses/decompresses slightly faster than the old data, and has - similar density. - - R=lookingbill - DELTA=1 (0 added, 0 deleted, 1 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1288 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@26 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 3dd93f3ec74df54a37f68bffabb058ac757bbe72 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 30 20:27:53 2011 +0000 - - Fix public issue #27: Add HAVE_CONFIG_H tests around the config.h - inclusion in snappy-stubs-internal.h, which eases compiling outside the - automake/autoconf framework. - - R=csilvers - DELTA=5 (4 added, 1 deleted, 0 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1152 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@25 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit f67bcaa61006da8b325a7ed9909a782590971815 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 30 20:27:39 2011 +0000 - - Fix public issue #26: Take memory allocation and reallocation entirely out of the - Measure() loop. This gives all algorithms a small speed boost, except Snappy which - already didn't do reallocation (so the measurements were slightly biased in its - favor). - - R=csilvers - DELTA=92 (69 added, 9 deleted, 14 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1151 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@24 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit cc333c1c5cc4eabceceb9848ff3cac6c604ecbc6 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 30 20:25:09 2011 +0000 - - Renamed "namespace zippy" to "namespace snappy" to reduce - the differences from the opensource code. Will make it easier - in the future to mix-and-match third-party code that uses - snappy with google code. - - Currently, csearch shows that the only external user of - "namespace zippy" is some bigtable code that accesses - a TEST variable, which is temporarily kept in the zippy - namespace. - - R=sesse - DELTA=123 (18 added, 3 deleted, 102 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1150 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@23 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit f19fb07e6dc79d6857e37df572dba25ff30fc8f3 -Author: snappy.mirrorbot@gmail.com -Date: Mon Mar 28 22:17:04 2011 +0000 - - Put back the final few lines of what was truncated during the - license header change. - - R=csilvers - DELTA=5 (4 added, 0 deleted, 1 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1094 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@22 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 7e8ca8f8315fc2ecb4eea19db695039ab2ca43a0 -Author: snappy.mirrorbot@gmail.com -Date: Sat Mar 26 02:34:34 2011 +0000 - - Change on 2011-03-25 19:18:00-07:00 by sesse - - Replace the Apache 2.0 license header by the BSD-type license header; - somehow a lot of the files were missed in the last round. - - R=dannyb,csilvers - DELTA=147 (74 added, 2 deleted, 71 changed) - - Change on 2011-03-25 19:25:07-07:00 by sesse - - Unbreak the build; the relicensing removed a bit too much (only comments - were intended, but I also accidentially removed some of the top lines of - the actual source). - - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1072 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@21 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit b4bbc1041b35d844ec26fbae25f2864995361fd8 -Author: snappy.mirrorbot@gmail.com -Date: Fri Mar 25 16:14:41 2011 +0000 - - Change Snappy from the Apache 2.0 to a BSD-type license. - - R=dannyb - DELTA=328 (80 added, 184 deleted, 64 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1061 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@20 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit c47640c510eb11cf8913edfa34f667bceb3a4401 -Author: snappy.mirrorbot@gmail.com -Date: Fri Mar 25 00:39:01 2011 +0000 - - Release Snappy 1.0.1, to soup up all the various small changes - that have been made since release. - - R=csilvers - DELTA=266 (260 added, 0 deleted, 6 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1057 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@19 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit b1dc1f643eaff897a5ce135f525799b99687b118 -Author: snappy.mirrorbot@gmail.com -Date: Thu Mar 24 19:15:54 2011 +0000 - - Fix a microbenchmark crash on mingw32; seemingly %lld is not universally - supported on Windows, and %I64d is recommended instead. - - R=csilvers - DELTA=6 (5 added, 0 deleted, 1 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1034 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@18 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 98004ca9afc62a3279dfe9d9a359083f61db437f -Author: snappy.mirrorbot@gmail.com -Date: Thu Mar 24 19:15:27 2011 +0000 - - Fix public issue #19: Fix unit test when Google Test is installed but the - gflags package isn't (Google Test is not properly initialized). - - Patch by Martin Gieseking. - - R=csilvers - DELTA=2 (1 added, 0 deleted, 1 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1033 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@17 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 444a6c5f72d6f8d8f7213a5bcc08b26606eb9934 -Author: snappy.mirrorbot@gmail.com -Date: Thu Mar 24 19:13:57 2011 +0000 - - Make the unit test work on systems without mmap(). This is required for, - among others, Windows support. For Windows in specific, we could have used - CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer - to compiling, and is of course also relevant for embedded systems with no MMU. - - (Part 2/2) - - R=csilvers - DELTA=15 (12 added, 3 deleted, 0 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1032 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@16 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 2e182e9bb840737f9cd8817e859dc17a82f2c16b -Author: snappy.mirrorbot@gmail.com -Date: Thu Mar 24 19:12:27 2011 +0000 - - Make the unit test work on systems without mmap(). This is required for, - among others, Windows support. For Windows in specific, we could have used - CreateFileMapping/MapViewOfFile, but this should at least get us a bit closer - to compiling, and is of course also relevant for embedded systems with no MMU. - - (Part 1/2) - - R=csilvers - DELTA=9 (8 added, 0 deleted, 1 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1031 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@15 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 48662cbb7f81533977334629790d346220084527 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 23:17:36 2011 +0000 - - Fix public issue #12: Don't keep autogenerated auto* files in Subversion; - it causes problems with others sending patches etc.. - - We can't get this 100% hermetic anyhow, due to files like lt~obsolete.m4, - so we can just as well go cleanly in the other direction. - - R=csilvers - DELTA=21038 (0 added, 21036 deleted, 2 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=1012 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@14 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 9e4717a586149c9538b353400312bab5ab5458c4 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 17:50:49 2011 +0000 - - Fix public issue tracker bug #3: Call AC_SUBST([LIBTOOL_DEPS]), or the rule - to rebuild libtool in Makefile.am won't work. - - R=csilvers - DELTA=1 (1 added, 0 deleted, 0 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=997 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@13 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 519c822a34a91a0c0eb32d98e9686ee7d9cd6651 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:16:39 2011 +0000 - - Fix public issue #10: Don't add GTEST_CPPFLAGS to snappy_unittest_CXXFLAGS; - it's not needed (CPPFLAGS are always included when compiling). - - R=csilvers - DELTA=1 (0 added, 1 deleted, 0 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=994 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@12 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit ea6b936378583cba730c33c8a53776edc1782208 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:16:18 2011 +0000 - - Fix public issue #9: Add -Wall -Werror to automake flags. - (This concerns automake itself, not the C++ compiler.) - - R=csilvers - DELTA=4 (3 added, 0 deleted, 1 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=993 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@11 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit e3ca06af253094b1c3a8eae508cd97accf077535 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:13:37 2011 +0000 - - Fix a typo in the Snappy README file. - - R=csilvers - DELTA=1 (0 added, 0 deleted, 1 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=992 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@10 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 39d27bea23873abaa663e884261386b17b058f20 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:13:13 2011 +0000 - - Fix public issue #6: Add a --with-gflags for disabling gflags autodetection - and using a manually given setting (use/don't use) instead. - - R=csilvers - DELTA=16 (13 added, 0 deleted, 3 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=991 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@9 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 60add43d99c1c31aeecd895cb555ad6f6520608e -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:12:44 2011 +0000 - - Fix public issue #5: Replace the EXTRA_LIBSNAPPY_LDFLAGS setup with something - slightly more standard, that also doesn't leak libtool command-line into - configure.ac. - - R=csilvers - DELTA=7 (0 added, 4 deleted, 3 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=990 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@8 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit a8dd1700879ad646106742aa0e9c3a48dc07b01d -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:12:22 2011 +0000 - - Fix public issue #4: Properly quote all macro arguments in configure.ac. - - R=csilvers - DELTA=16 (0 added, 0 deleted, 16 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=989 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@7 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 79752dd7033658e28dc894de55012bdf2c9afca3 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:11:54 2011 +0000 - - Fix public issue #7: Don't use internal variables named ac_*, as those belong - to autoconf's namespace. - - R=csilvers - DELTA=6 (0 added, 0 deleted, 6 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=988 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@6 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 46e39fb20c297129494b969ac4ea64fcd04b4fa0 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:11:09 2011 +0000 - - Add missing licensing headers to a few files. (Part 2/2.) - - R=csilvers - DELTA=12 (12 added, 0 deleted, 0 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=987 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@5 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 3e764216fc8edaafca480443b90e55c14eaae2c2 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:10:39 2011 +0000 - - Add mising licensing headers to a few files. (Part 1/2.) - - R=csilvers - DELTA=24 (24 added, 0 deleted, 0 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=986 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@4 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 9a59f183c8ffec62dcdabd3499d0d515e44e4ef0 -Author: snappy.mirrorbot@gmail.com -Date: Wed Mar 23 11:10:04 2011 +0000 - - Use the correct license file for the Apache 2.0 license; - spotted by Florian Weimer. - - R=csilvers - DELTA=202 (174 added, 0 deleted, 28 changed) - - - Revision created by MOE tool push_codebase. - MOE_MIGRATION=985 - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@3 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 28a64402392c791905d6e1384ea1b48a5cb0b281 -Author: snappy.mirrorbot@gmail.com -Date: Fri Mar 18 17:14:15 2011 +0000 - - Revision created by MOE tool push_codebase. - MOE_MIGRATION= - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@2 03e5f5b5-db94-4691-08a0-1a8bf15f6143 - -commit 7c3c6077b72b4ae2237267a20f640b55e9a90569 -Author: sesse@google.com -Date: Fri Mar 18 17:13:52 2011 +0000 - - Create trunk directory. - - - git-svn-id: https://snappy.googlecode.com/svn/trunk@1 03e5f5b5-db94-4691-08a0-1a8bf15f6143 diff --git a/other-licenses/snappy/src/NEWS b/other-licenses/snappy/src/NEWS index 4eb7a1d1a92a..98048dbdd8c5 100644 --- a/other-licenses/snappy/src/NEWS +++ b/other-licenses/snappy/src/NEWS @@ -1,3 +1,51 @@ +Snappy v1.1.8, January 15th 2020: + + * Small performance improvements. + + * Removed snappy::string alias for std::string. + + * Improved CMake configuration. + +Snappy v1.1.7, August 24th 2017: + + * Improved CMake build support for 64-bit Linux distributions. + + * MSVC builds now use MSVC-specific intrinsics that map to clzll. + + * ARM64 (AArch64) builds use the code paths optimized for 64-bit processors. + +Snappy v1.1.6, July 12th 2017: + +This is a re-release of v1.1.5 with proper SONAME / SOVERSION values. + +Snappy v1.1.5, June 28th 2017: + +This release has broken SONAME / SOVERSION values. Users of snappy as a shared +library should avoid 1.1.5 and use 1.1.6 instead. SONAME / SOVERSION errors will +manifest as the dynamic library loader complaining that it cannot find snappy's +shared library file (libsnappy.so / libsnappy.dylib), or that the library it +found does not have the required version. 1.1.6 has the same code as 1.1.5, but +carries build configuration fixes for the issues above. + + * Add CMake build support. The autoconf build support is now deprecated, and + will be removed in the next release. + + * Add AppVeyor configuration, for Windows CI coverage. + + * Small performance improvement on little-endian PowerPC. + + * Small performance improvement on LLVM with position-independent executables. + + * Fix a few issues with various build environments. + +Snappy v1.1.4, January 25th 2017: + + * Fix a 1% performance regression when snappy is used in PIE executables. + + * Improve compression performance by 5%. + + * Improve decompression performance by 20%. + Snappy v1.1.3, July 6th 2015: This is the first release to be done from GitHub, which means that diff --git a/other-licenses/snappy/src/README b/other-licenses/snappy/src/README.md similarity index 86% rename from other-licenses/snappy/src/README rename to other-licenses/snappy/src/README.md index c60dab9a1c2e..cef4017492ea 100644 --- a/other-licenses/snappy/src/README +++ b/other-licenses/snappy/src/README.md @@ -34,7 +34,7 @@ Snappy is intended to be fast. On a single core of a Core i7 processor in 64-bit mode, it compresses at about 250 MB/sec or more and decompresses at about 500 MB/sec or more. (These numbers are for the slowest inputs in our benchmark suite; others are much faster.) In our tests, Snappy usually -is faster than algorithms in the same class (e.g. LZO, LZF, FastLZ, QuickLZ, +is faster than algorithms in the same class (e.g. LZO, LZF, QuickLZ, etc.) while achieving comparable compression ratios. Typical compression ratios (based on the benchmark suite) are about 1.5-1.7x @@ -51,8 +51,8 @@ In particular: - Snappy uses 64-bit operations in several places to process more data at once than would otherwise be possible. - - Snappy assumes unaligned 32- and 64-bit loads and stores are cheap. - On some platforms, these must be emulated with single-byte loads + - Snappy assumes unaligned 32 and 64-bit loads and stores are cheap. + On some platforms, these must be emulated with single-byte loads and stores, which is much slower. - Snappy assumes little-endian throughout, and needs to byte-swap data in several places if running on a big-endian platform. @@ -62,25 +62,40 @@ Performance optimizations, whether for 64-bit x86 or other platforms, are of course most welcome; see "Contact", below. +Building +======== + +You need the CMake version specified in [CMakeLists.txt](./CMakeLists.txt) +or later to build: + +```bash +mkdir build +cd build && cmake ../ && make +``` + Usage ===== Note that Snappy, both the implementation and the main interface, is written in C++. However, several third-party bindings to other languages -are available; see the home page at http://google.github.io/snappy/ -for more information. Also, if you want to use Snappy from C code, you can -use the included C bindings in snappy-c.h. +are available; see the [home page](docs/README.md) for more information. +Also, if you want to use Snappy from C code, you can use the included C +bindings in snappy-c.h. To use Snappy from your own C++ program, include the file "snappy.h" from your calling file, and link against the compiled library. There are many ways to call Snappy, but the simplest possible is - snappy::Compress(input.data(), input.size(), &output); +```c++ +snappy::Compress(input.data(), input.size(), &output); +``` and similarly - snappy::Uncompress(input.data(), input.size(), &output); +```c++ +snappy::Uncompress(input.data(), input.size(), &output); +``` where "input" and "output" are both instances of std::string. @@ -102,12 +117,12 @@ tests to verify you have not broken anything. Note that if you have the Google Test library installed, unit test behavior (especially failures) will be significantly more user-friendly. You can find Google Test at - http://github.com/google/googletest + https://github.com/google/googletest You probably also want the gflags library for handling of command-line flags; you can find it at - http://gflags.github.io/gflags/ + https://gflags.github.io/gflags/ In addition to the unit tests, snappy contains microbenchmarks used to tune compression and decompression performance. These are automatically run @@ -116,7 +131,7 @@ before the unit tests, but you can disable them using the flag need to edit the source). Finally, snappy can benchmark Snappy against a few other compression libraries -(zlib, LZO, LZF, FastLZ and QuickLZ), if they were detected at configure time. +(zlib, LZO, LZF, and QuickLZ), if they were detected at configure time. To benchmark using a given file, give the compression algorithm you want to test Snappy against (e.g. --zlib) and then a list of one or more file names on the command line. The testdata/ directory contains the files used by the @@ -130,10 +145,4 @@ Contact ======= Snappy is distributed through GitHub. For the latest version, a bug tracker, -and other information, see - - http://google.github.io/snappy/ - -or the repository at - - https://github.com/google/snappy +and other information, see https://github.com/google/snappy. diff --git a/other-licenses/snappy/src/snappy-internal.h b/other-licenses/snappy/src/snappy-internal.h index c4d1f6dac674..1e1c307fef82 100644 --- a/other-licenses/snappy/src/snappy-internal.h +++ b/other-licenses/snappy/src/snappy-internal.h @@ -36,21 +36,30 @@ namespace snappy { namespace internal { +// Working memory performs a single allocation to hold all scratch space +// required for compression. class WorkingMemory { public: - WorkingMemory() : large_table_(NULL) { } - ~WorkingMemory() { delete[] large_table_; } + explicit WorkingMemory(size_t input_size); + ~WorkingMemory(); // Allocates and clears a hash table using memory in "*this", // stores the number of buckets in "*table_size" and returns a pointer to // the base of the hash table. - uint16* GetHashTable(size_t input_size, int* table_size); + uint16* GetHashTable(size_t fragment_size, int* table_size) const; + char* GetScratchInput() const { return input_; } + char* GetScratchOutput() const { return output_; } private: - uint16 small_table_[1<<10]; // 2KB - uint16* large_table_; // Allocated only when needed + char* mem_; // the allocated memory, never nullptr + size_t size_; // the size of the allocated memory, never 0 + uint16* table_; // the pointer to the hashtable + char* input_; // the pointer to the input scratch buffer + char* output_; // the pointer to the output scratch buffer - DISALLOW_COPY_AND_ASSIGN(WorkingMemory); + // No copying + WorkingMemory(const WorkingMemory&); + void operator=(const WorkingMemory&); }; // Flat array compression that does not emit the "uncompressed length" @@ -70,57 +79,72 @@ char* CompressFragment(const char* input, uint16* table, const int table_size); -// Return the largest n such that +// Find the largest n such that // // s1[0,n-1] == s2[0,n-1] // and n <= (s2_limit - s2). // +// Return make_pair(n, n < 8). // Does not read *s2_limit or beyond. // Does not read *(s1 + (s2_limit - s2)) or beyond. // Requires that s2_limit >= s2. // -// Separate implementation for x86_64, for speed. Uses the fact that -// x86_64 is little endian. -#if defined(ARCH_K8) -static inline int FindMatchLength(const char* s1, - const char* s2, - const char* s2_limit) { +// Separate implementation for 64-bit, little-endian cpus. +#if !defined(SNAPPY_IS_BIG_ENDIAN) && \ + (defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)) +static inline std::pair FindMatchLength(const char* s1, + const char* s2, + const char* s2_limit) { assert(s2_limit >= s2); - int matched = 0; + size_t matched = 0; + + // This block isn't necessary for correctness; we could just start looping + // immediately. As an optimization though, it is useful. It creates some not + // uncommon code paths that determine, without extra effort, whether the match + // length is less than 8. In short, we are hoping to avoid a conditional + // branch, and perhaps get better code layout from the C++ compiler. + if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) { + uint64 a1 = UNALIGNED_LOAD64(s1); + uint64 a2 = UNALIGNED_LOAD64(s2); + if (a1 != a2) { + return std::pair(Bits::FindLSBSetNonZero64(a1 ^ a2) >> 3, + true); + } else { + matched = 8; + s2 += 8; + } + } // Find out how long the match is. We loop over the data 64 bits at a // time until we find a 64-bit block that doesn't match; then we find // the first non-matching bit and use that to calculate the total // length of the match. - while (PREDICT_TRUE(s2 <= s2_limit - 8)) { + while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) { if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) { s2 += 8; matched += 8; } else { - // On current (mid-2008) Opteron models there is a 3% more - // efficient code sequence to find the first non-matching byte. - // However, what follows is ~10% better on Intel Core 2 and newer, - // and we expect AMD's bsf instruction to improve. uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched); int matching_bits = Bits::FindLSBSetNonZero64(x); matched += matching_bits >> 3; - return matched; + assert(matched >= 8); + return std::pair(matched, false); } } - while (PREDICT_TRUE(s2 < s2_limit)) { + while (SNAPPY_PREDICT_TRUE(s2 < s2_limit)) { if (s1[matched] == *s2) { ++s2; ++matched; } else { - return matched; + return std::pair(matched, matched < 8); } } - return matched; + return std::pair(matched, matched < 8); } #else -static inline int FindMatchLength(const char* s1, - const char* s2, - const char* s2_limit) { +static inline std::pair FindMatchLength(const char* s1, + const char* s2, + const char* s2_limit) { // Implementation based on the x86-64 version, above. assert(s2_limit >= s2); int matched = 0; @@ -140,7 +164,7 @@ static inline int FindMatchLength(const char* s1, ++matched; } } - return matched; + return std::pair(matched, matched < 8); } #endif @@ -155,11 +179,6 @@ enum { }; static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual offset. -// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits -static const uint32 wordmask[] = { - 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu -}; - // Data stored per entry in lookup table: // Range Bits-used Description // ------------------------------------ diff --git a/other-licenses/snappy/src/snappy-stubs-internal.cc b/other-licenses/snappy/src/snappy-stubs-internal.cc index 6ed334371f15..66ed2e90393a 100644 --- a/other-licenses/snappy/src/snappy-stubs-internal.cc +++ b/other-licenses/snappy/src/snappy-stubs-internal.cc @@ -33,7 +33,7 @@ namespace snappy { -void Varint::Append32(string* s, uint32 value) { +void Varint::Append32(std::string* s, uint32 value) { char buf[Varint::kMax32]; const char* p = Varint::Encode32(buf, value); s->append(buf, p - buf); diff --git a/other-licenses/snappy/src/snappy-stubs-internal.h b/other-licenses/snappy/src/snappy-stubs-internal.h index 1954c63da913..4854689d1771 100644 --- a/other-licenses/snappy/src/snappy-stubs-internal.h +++ b/other-licenses/snappy/src/snappy-stubs-internal.h @@ -45,6 +45,26 @@ #include #endif +#ifdef HAVE_UNISTD_H +#include +#endif + +#if defined(_MSC_VER) +#include +#endif // defined(_MSC_VER) + +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +#if __has_feature(memory_sanitizer) +#include +#define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison((address), (size)) +#else +#define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */ +#endif // __has_feature(memory_sanitizer) + #include "snappy-stubs-public.h" #if defined(__x86_64__) @@ -52,6 +72,14 @@ // Enable 64-bit optimized versions of some routines. #define ARCH_K8 1 +#elif defined(__ppc64__) + +#define ARCH_PPC 1 + +#elif defined(__aarch64__) + +#define ARCH_ARM 1 + #endif // Needed by OS X, among others. @@ -59,10 +87,6 @@ #define MAP_ANONYMOUS MAP_ANON #endif -// Pull in std::min, std::ostream, and the likes. This is safe because this -// header file is never used from any public header files. -using namespace std; - // The size of an array, if known at compile-time. // Will give unexpected results if used on a pointer. // We undefine it first, since some compilers already have a definition. @@ -73,11 +97,11 @@ using namespace std; // Static prediction hints. #ifdef HAVE_BUILTIN_EXPECT -#define PREDICT_FALSE(x) (__builtin_expect(x, 0)) -#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +#define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) #else -#define PREDICT_FALSE(x) x -#define PREDICT_TRUE(x) x +#define SNAPPY_PREDICT_FALSE(x) x +#define SNAPPY_PREDICT_TRUE(x) x #endif // This is only used for recomputing the tag byte table used during @@ -96,9 +120,10 @@ static const int64 kint64max = static_cast(0x7FFFFFFFFFFFFFFFLL); // Potentially unaligned loads and stores. -// x86 and PowerPC can simply do these loads and stores native. +// x86, PowerPC, and ARM64 can simply do these loads and stores native. -#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) +#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ + defined(__aarch64__) #define UNALIGNED_LOAD16(_p) (*reinterpret_cast(_p)) #define UNALIGNED_LOAD32(_p) (*reinterpret_cast(_p)) @@ -174,7 +199,7 @@ struct Unaligned32Struct { ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \ (_val)) -// TODO(user): NEON supports unaligned 64-bit loads and stores. +// TODO: NEON supports unaligned 64-bit loads and stores. // See if that would be more efficient on platforms supporting it, // at least for copies. @@ -225,22 +250,8 @@ inline void UNALIGNED_STORE64(void *p, uint64 v) { #endif -// This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64 -// on some platforms, in particular ARM. -inline void UnalignedCopy64(const void *src, void *dst) { - if (sizeof(void *) == 8) { - UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src)); - } else { - const char *src_char = reinterpret_cast(src); - char *dst_char = reinterpret_cast(dst); - - UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char)); - UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4)); - } -} - // The following guarantees declaration of the byte swap functions. -#ifdef WORDS_BIGENDIAN +#if defined(SNAPPY_IS_BIG_ENDIAN) #ifdef HAVE_SYS_BYTEORDER_H #include @@ -297,7 +308,7 @@ inline uint64 bswap_64(uint64 x) { #endif -#endif // WORDS_BIGENDIAN +#endif // defined(SNAPPY_IS_BIG_ENDIAN) // Convert to little-endian storage, opposite of network format. // Convert x from host to little endian: x = LittleEndian.FromHost(x); @@ -311,7 +322,7 @@ inline uint64 bswap_64(uint64 x) { class LittleEndian { public: // Conversion functions. -#ifdef WORDS_BIGENDIAN +#if defined(SNAPPY_IS_BIG_ENDIAN) static uint16 FromHost16(uint16 x) { return bswap_16(x); } static uint16 ToHost16(uint16 x) { return bswap_16(x); } @@ -321,7 +332,7 @@ class LittleEndian { static bool IsLittleEndian() { return false; } -#else // !defined(WORDS_BIGENDIAN) +#else // !defined(SNAPPY_IS_BIG_ENDIAN) static uint16 FromHost16(uint16 x) { return x; } static uint16 ToHost16(uint16 x) { return x; } @@ -331,7 +342,7 @@ class LittleEndian { static bool IsLittleEndian() { return true; } -#endif // !defined(WORDS_BIGENDIAN) +#endif // !defined(SNAPPY_IS_BIG_ENDIAN) // Functions to do unaligned loads and stores in little-endian order. static uint16 Load16(const void *p) { @@ -354,6 +365,9 @@ class LittleEndian { // Some bit-manipulation functions. class Bits { public: + // Return floor(log2(n)) for positive integer n. + static int Log2FloorNonZero(uint32 n); + // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0. static int Log2Floor(uint32 n); @@ -361,31 +375,85 @@ class Bits { // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except // that it's 0-indexed. static int FindLSBSetNonZero(uint32 n); + +#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) static int FindLSBSetNonZero64(uint64 n); +#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) private: - DISALLOW_COPY_AND_ASSIGN(Bits); + // No copying + Bits(const Bits&); + void operator=(const Bits&); }; #ifdef HAVE_BUILTIN_CTZ +inline int Bits::Log2FloorNonZero(uint32 n) { + assert(n != 0); + // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof + // represents subtraction in base 2 and observes that there's no carry. + // + // GCC and Clang represent __builtin_clz on x86 as 31 ^ _bit_scan_reverse(x). + // Using "31 ^" here instead of "31 -" allows the optimizer to strip the + // function body down to _bit_scan_reverse(x). + return 31 ^ __builtin_clz(n); +} + inline int Bits::Log2Floor(uint32 n) { - return n == 0 ? -1 : 31 ^ __builtin_clz(n); + return (n == 0) ? -1 : Bits::Log2FloorNonZero(n); } inline int Bits::FindLSBSetNonZero(uint32 n) { + assert(n != 0); return __builtin_ctz(n); } +#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) inline int Bits::FindLSBSetNonZero64(uint64 n) { + assert(n != 0); return __builtin_ctzll(n); } +#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) + +#elif defined(_MSC_VER) + +inline int Bits::Log2FloorNonZero(uint32 n) { + assert(n != 0); + unsigned long where; + _BitScanReverse(&where, n); + return static_cast(where); +} + +inline int Bits::Log2Floor(uint32 n) { + unsigned long where; + if (_BitScanReverse(&where, n)) + return static_cast(where); + return -1; +} + +inline int Bits::FindLSBSetNonZero(uint32 n) { + assert(n != 0); + unsigned long where; + if (_BitScanForward(&where, n)) + return static_cast(where); + return 32; +} + +#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) +inline int Bits::FindLSBSetNonZero64(uint64 n) { + assert(n != 0); + unsigned long where; + if (_BitScanForward64(&where, n)) + return static_cast(where); + return 64; +} +#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) #else // Portable versions. -inline int Bits::Log2Floor(uint32 n) { - if (n == 0) - return -1; +inline int Bits::Log2FloorNonZero(uint32 n) { + assert(n != 0); + int log = 0; uint32 value = n; for (int i = 4; i >= 0; --i) { @@ -400,7 +468,13 @@ inline int Bits::Log2Floor(uint32 n) { return log; } +inline int Bits::Log2Floor(uint32 n) { + return (n == 0) ? -1 : Bits::Log2FloorNonZero(n); +} + inline int Bits::FindLSBSetNonZero(uint32 n) { + assert(n != 0); + int rc = 31; for (int i = 4, shift = 1 << 4; i >= 0; --i) { const uint32 x = n << shift; @@ -413,8 +487,11 @@ inline int Bits::FindLSBSetNonZero(uint32 n) { return rc; } +#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero(). inline int Bits::FindLSBSetNonZero64(uint64 n) { + assert(n != 0); + const uint32 bottombits = static_cast(n); if (bottombits == 0) { // Bottom bits are zero, so scan in top bits @@ -423,6 +500,7 @@ inline int Bits::FindLSBSetNonZero64(uint64 n) { return FindLSBSetNonZero(bottombits); } } +#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) #endif // End portable versions. @@ -446,7 +524,7 @@ class Varint { static char* Encode32(char* ptr, uint32 v); // EFFECTS Appends the varint representation of "value" to "*s". - static void Append32(string* s, uint32 value); + static void Append32(std::string* s, uint32 value); }; inline const char* Varint::Parse32WithLimit(const char* p, @@ -503,7 +581,7 @@ inline char* Varint::Encode32(char* sptr, uint32 v) { // replace this function with one that resizes the string without // filling the new space with zeros (if applicable) -- // it will be non-portable but faster. -inline void STLStringResizeUninitialized(string* s, size_t new_size) { +inline void STLStringResizeUninitialized(std::string* s, size_t new_size) { s->resize(new_size); } @@ -519,7 +597,7 @@ inline void STLStringResizeUninitialized(string* s, size_t new_size) { // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530) // proposes this as the method. It will officially be part of the standard // for C++0x. This should already work on all current implementations. -inline char* string_as_array(string* str) { +inline char* string_as_array(std::string* str) { return str->empty() ? NULL : &*str->begin(); } diff --git a/other-licenses/snappy/src/snappy-stubs-public.h.in b/other-licenses/snappy/src/snappy-stubs-public.h.in index 96989ac390cc..416ab9978c2b 100644 --- a/other-licenses/snappy/src/snappy-stubs-public.h.in +++ b/other-licenses/snappy/src/snappy-stubs-public.h.in @@ -1,5 +1,4 @@ // Copyright 2011 Google Inc. All Rights Reserved. -// Author: sesse@google.com (Steinar H. Gunderson) // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -36,64 +35,39 @@ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ -#if @ac_cv_have_stdint_h@ -#include -#endif +#include +#include +#include -#if @ac_cv_have_stddef_h@ -#include -#endif - -#if @ac_cv_have_sys_uio_h@ +#if ${HAVE_SYS_UIO_H_01} // HAVE_SYS_UIO_H #include -#endif +#endif // HAVE_SYS_UIO_H -#define SNAPPY_MAJOR @SNAPPY_MAJOR@ -#define SNAPPY_MINOR @SNAPPY_MINOR@ -#define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@ +#define SNAPPY_MAJOR ${PROJECT_VERSION_MAJOR} +#define SNAPPY_MINOR ${PROJECT_VERSION_MINOR} +#define SNAPPY_PATCHLEVEL ${PROJECT_VERSION_PATCH} #define SNAPPY_VERSION \ ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL) -#include - namespace snappy { -#if @ac_cv_have_stdint_h@ -typedef int8_t int8; -typedef uint8_t uint8; -typedef int16_t int16; -typedef uint16_t uint16; -typedef int32_t int32; -typedef uint32_t uint32; -typedef int64_t int64; -typedef uint64_t uint64; -#else -typedef signed char int8; -typedef unsigned char uint8; -typedef short int16; -typedef unsigned short uint16; -typedef int int32; -typedef unsigned int uint32; -typedef long long int64; -typedef unsigned long long uint64; -#endif +using int8 = std::int8_t; +using uint8 = std::uint8_t; +using int16 = std::int16_t; +using uint16 = std::uint16_t; +using int32 = std::int32_t; +using uint32 = std::uint32_t; +using int64 = std::int64_t; +using uint64 = std::uint64_t; -typedef std::string string; - -#ifndef DISALLOW_COPY_AND_ASSIGN -#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&); \ - void operator=(const TypeName&) -#endif - -#if !@ac_cv_have_sys_uio_h@ +#if !${HAVE_SYS_UIO_H_01} // !HAVE_SYS_UIO_H // Windows does not have an iovec type, yet the concept is universally useful. // It is simple to define it ourselves, so we put it inside our own namespace. struct iovec { - void* iov_base; - size_t iov_len; + void* iov_base; + size_t iov_len; }; -#endif +#endif // !HAVE_SYS_UIO_H } // namespace snappy diff --git a/other-licenses/snappy/src/snappy-test.cc b/other-licenses/snappy/src/snappy-test.cc index 7f1d0a8d1a7f..83be2d36311e 100644 --- a/other-licenses/snappy/src/snappy-test.cc +++ b/other-licenses/snappy/src/snappy-test.cc @@ -33,6 +33,9 @@ #endif #ifdef HAVE_WINDOWS_H +// Needed to be able to use std::max without workarounds in the source code. +// https://support.microsoft.com/en-us/help/143208/prb-using-stl-in-windows-program-can-cause-min-max-conflicts +#define NOMINMAX #include #endif @@ -45,12 +48,12 @@ DEFINE_bool(run_microbenchmarks, true, namespace snappy { -string ReadTestDataFile(const string& base, size_t size_limit) { - string contents; +std::string ReadTestDataFile(const std::string& base, size_t size_limit) { + std::string contents; const char* srcdir = getenv("srcdir"); // This is set by Automake. - string prefix; + std::string prefix; if (srcdir) { - prefix = string(srcdir) + "/"; + prefix = std::string(srcdir) + "/"; } file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults() ).CheckSuccess(); @@ -60,11 +63,11 @@ string ReadTestDataFile(const string& base, size_t size_limit) { return contents; } -string ReadTestDataFile(const string& base) { +std::string ReadTestDataFile(const std::string& base) { return ReadTestDataFile(base, 0); } -string StringPrintf(const char* format, ...) { +std::string StrFormat(const char* format, ...) { char buf[4096]; va_list ap; va_start(ap, format); @@ -76,7 +79,7 @@ string StringPrintf(const char* format, ...) { bool benchmark_running = false; int64 benchmark_real_time_us = 0; int64 benchmark_cpu_time_us = 0; -string *benchmark_label = NULL; +std::string* benchmark_label = nullptr; int64 benchmark_bytes_processed = 0; void ResetBenchmarkTiming() { @@ -160,11 +163,11 @@ void StopBenchmarkTiming() { benchmark_running = false; } -void SetBenchmarkLabel(const string& str) { +void SetBenchmarkLabel(const std::string& str) { if (benchmark_label) { delete benchmark_label; } - benchmark_label = new string(str); + benchmark_label = new std::string(str); } void SetBenchmarkBytesProcessed(int64 bytes) { @@ -201,7 +204,7 @@ void Benchmark::Run() { if (benchmark_real_time_us > 0) { num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us; } - num_iterations = max(num_iterations, kCalibrateIterations); + num_iterations = std::max(num_iterations, kCalibrateIterations); BenchmarkRun benchmark_runs[kNumRuns]; for (int run = 0; run < kNumRuns; ++run) { @@ -214,13 +217,13 @@ void Benchmark::Run() { benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us; } - string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num); - string human_readable_speed; + std::string heading = StrFormat("%s/%d", name_.c_str(), test_case_num); + std::string human_readable_speed; - nth_element(benchmark_runs, - benchmark_runs + kMedianPos, - benchmark_runs + kNumRuns, - BenchmarkCompareCPUTime()); + std::nth_element(benchmark_runs, + benchmark_runs + kMedianPos, + benchmark_runs + kNumRuns, + BenchmarkCompareCPUTime()); int64 real_time_us = benchmark_runs[kMedianPos].real_time_us; int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us; if (cpu_time_us <= 0) { @@ -229,15 +232,16 @@ void Benchmark::Run() { int64 bytes_per_second = benchmark_bytes_processed * 1000000 / cpu_time_us; if (bytes_per_second < 1024) { - human_readable_speed = StringPrintf("%dB/s", bytes_per_second); + human_readable_speed = + StrFormat("%dB/s", static_cast(bytes_per_second)); } else if (bytes_per_second < 1024 * 1024) { - human_readable_speed = StringPrintf( + human_readable_speed = StrFormat( "%.1fkB/s", bytes_per_second / 1024.0f); } else if (bytes_per_second < 1024 * 1024 * 1024) { - human_readable_speed = StringPrintf( + human_readable_speed = StrFormat( "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f)); } else { - human_readable_speed = StringPrintf( + human_readable_speed = StrFormat( "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f)); } } @@ -523,8 +527,8 @@ int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen, LOG(WARNING) << "UncompressChunkOrAll: Received some extra data, bytes total: " << uncomp_stream_.avail_in << " bytes: " - << string(reinterpret_cast(uncomp_stream_.next_in), - min(int(uncomp_stream_.avail_in), 20)); + << std::string(reinterpret_cast(uncomp_stream_.next_in), + std::min(int(uncomp_stream_.avail_in), 20)); UncompressErrorInit(); return Z_DATA_ERROR; // what's the extra data for? } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) { diff --git a/other-licenses/snappy/src/snappy-test.h b/other-licenses/snappy/src/snappy-test.h index 5fb09c78cd88..c8b7d388607e 100644 --- a/other-licenses/snappy/src/snappy-test.h +++ b/other-licenses/snappy/src/snappy-test.h @@ -55,8 +55,6 @@ #include #endif -#include - #ifdef HAVE_GTEST #include @@ -110,26 +108,8 @@ #include "lzo/lzo1x.h" #endif -#ifdef HAVE_LIBLZF -extern "C" { -#include "lzf.h" -} -#endif - -#ifdef HAVE_LIBFASTLZ -#include "fastlz.h" -#endif - -#ifdef HAVE_LIBQUICKLZ -#include "quicklz.h" -#endif - namespace { -namespace File { - void Init() { } -} // namespace File - namespace file { int Defaults() { return 0; } @@ -138,7 +118,8 @@ namespace file { void CheckSuccess() { } }; - DummyStatus GetContents(const string& filename, string* data, int unused) { + DummyStatus GetContents( + const std::string& filename, std::string* data, int unused) { FILE* fp = fopen(filename.c_str(), "rb"); if (fp == NULL) { perror(filename.c_str()); @@ -153,7 +134,7 @@ namespace file { perror("fread"); exit(1); } - data->append(string(buf, ret)); + data->append(std::string(buf, ret)); } fclose(fp); @@ -161,9 +142,8 @@ namespace file { return DummyStatus(); } - DummyStatus SetContents(const string& filename, - const string& str, - int unused) { + inline DummyStatus SetContents( + const std::string& filename, const std::string& str, int unused) { FILE* fp = fopen(filename.c_str(), "wb"); if (fp == NULL) { perror(filename.c_str()); @@ -187,7 +167,7 @@ namespace file { namespace snappy { #define FLAGS_test_random_seed 301 -typedef string TypeParam; +using TypeParam = std::string; void Test_CorruptedTest_VerifyCorrupted(); void Test_Snappy_SimpleTests(); @@ -201,63 +181,13 @@ void Test_Snappy_ReadPastEndOfBuffer(); void Test_Snappy_FindMatchLength(); void Test_Snappy_FindMatchLengthRandom(); -string ReadTestDataFile(const string& base, size_t size_limit); +std::string ReadTestDataFile(const std::string& base, size_t size_limit); -string ReadTestDataFile(const string& base); +std::string ReadTestDataFile(const std::string& base); // A sprintf() variant that returns a std::string. // Not safe for general use due to truncation issues. -string StringPrintf(const char* format, ...); - -// A simple, non-cryptographically-secure random generator. -class ACMRandom { - public: - explicit ACMRandom(uint32 seed) : seed_(seed) {} - - int32 Next(); - - int32 Uniform(int32 n) { - return Next() % n; - } - uint8 Rand8() { - return static_cast((Next() >> 1) & 0x000000ff); - } - bool OneIn(int X) { return Uniform(X) == 0; } - - // Skewed: pick "base" uniformly from range [0,max_log] and then - // return "base" random bits. The effect is to pick a number in the - // range [0,2^max_log-1] with bias towards smaller numbers. - int32 Skewed(int max_log); - - private: - static const uint32 M = 2147483647L; // 2^31-1 - uint32 seed_; -}; - -inline int32 ACMRandom::Next() { - static const uint64 A = 16807; // bits 14, 8, 7, 5, 2, 1, 0 - // We are computing - // seed_ = (seed_ * A) % M, where M = 2^31-1 - // - // seed_ must not be zero or M, or else all subsequent computed values - // will be zero or M respectively. For all other values, seed_ will end - // up cycling through every number in [1,M-1] - uint64 product = seed_ * A; - - // Compute (product % M) using the fact that ((x << 31) % M) == x. - seed_ = (product >> 31) + (product & M); - // The first reduction may overflow by 1 bit, so we may need to repeat. - // mod == M is not possible; using > allows the faster sign-bit-based test. - if (seed_ > M) { - seed_ -= M; - } - return seed_; -} - -inline int32 ACMRandom::Skewed(int max_log) { - const int32 base = (Next() - 1) % (max_log+1); - return (Next() - 1) & ((1u << base)-1); -} +std::string StrFormat(const char* format, ...); // A wall-time clock. This stub is not super-accurate, nor resistant to the // system time changing. @@ -311,8 +241,8 @@ typedef void (*BenchmarkFunction)(int, int); class Benchmark { public: - Benchmark(const string& name, BenchmarkFunction function) : - name_(name), function_(function) {} + Benchmark(const std::string& name, BenchmarkFunction function) + : name_(name), function_(function) {} Benchmark* DenseRange(int start, int stop) { start_ = start; @@ -323,7 +253,7 @@ class Benchmark { void Run(); private: - const string name_; + const std::string name_; const BenchmarkFunction function_; int start_, stop_; }; @@ -335,11 +265,13 @@ extern Benchmark* Benchmark_BM_UFlat; extern Benchmark* Benchmark_BM_UIOVec; extern Benchmark* Benchmark_BM_UValidate; extern Benchmark* Benchmark_BM_ZFlat; +extern Benchmark* Benchmark_BM_ZFlatAll; +extern Benchmark* Benchmark_BM_ZFlatIncreasingTableSize; void ResetBenchmarkTiming(); void StartBenchmarkTiming(); void StopBenchmarkTiming(); -void SetBenchmarkLabel(const string& str); +void SetBenchmarkLabel(const std::string& str); void SetBenchmarkBytesProcessed(int64 bytes); #ifdef HAVE_LIBZ @@ -467,7 +399,7 @@ class ZLib { DECLARE_bool(run_microbenchmarks); -static void RunSpecifiedBenchmarks() { +static inline void RunSpecifiedBenchmarks() { if (!FLAGS_run_microbenchmarks) { return; } @@ -486,6 +418,8 @@ static void RunSpecifiedBenchmarks() { snappy::Benchmark_BM_UIOVec->Run(); snappy::Benchmark_BM_UValidate->Run(); snappy::Benchmark_BM_ZFlat->Run(); + snappy::Benchmark_BM_ZFlatAll->Run(); + snappy::Benchmark_BM_ZFlatIncreasingTableSize->Run(); fprintf(stderr, "\n"); } @@ -515,10 +449,6 @@ static inline int RUN_ALL_TESTS() { // For main(). namespace snappy { -static void CompressFile(const char* fname); -static void UncompressFile(const char* fname); -static void MeasureFile(const char* fname); - // Logging. #define LOG(level) LogMessage() @@ -529,15 +459,15 @@ class LogMessage { public: LogMessage() { } ~LogMessage() { - cerr << endl; + std::cerr << std::endl; } LogMessage& operator<<(const std::string& msg) { - cerr << msg; + std::cerr << msg; return *this; } LogMessage& operator<<(int x) { - cerr << x; + std::cerr << x; return *this; } }; @@ -546,7 +476,7 @@ class LogMessage { // and ones that are always active. #define CRASH_UNLESS(condition) \ - PREDICT_TRUE(condition) ? (void)0 : \ + SNAPPY_PREDICT_TRUE(condition) ? (void)0 : \ snappy::LogMessageVoidify() & snappy::LogMessageCrash() #ifdef _MSC_VER @@ -560,7 +490,7 @@ class LogMessageCrash : public LogMessage { public: LogMessageCrash() { } ~LogMessageCrash() { - cerr << endl; + std::cerr << std::endl; abort(); } }; @@ -590,10 +520,6 @@ class LogMessageVoidify { #define CHECK_GT(a, b) CRASH_UNLESS((a) > (b)) #define CHECK_OK(cond) (cond).CheckSuccess() -} // namespace - -using snappy::CompressFile; -using snappy::UncompressFile; -using snappy::MeasureFile; +} // namespace snappy #endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_TEST_H_ diff --git a/other-licenses/snappy/src/snappy.cc b/other-licenses/snappy/src/snappy.cc index 8a3668c693c1..ce1eef463069 100644 --- a/other-licenses/snappy/src/snappy.cc +++ b/other-licenses/snappy/src/snappy.cc @@ -30,22 +30,57 @@ #include "snappy-internal.h" #include "snappy-sinksource.h" +#if !defined(SNAPPY_HAVE_SSSE3) +// __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD +// support between SSE2 and AVX (so SSSE3 instructions require AVX support), and +// defines __AVX__ when AVX support is available. +#if defined(__SSSE3__) || defined(__AVX__) +#define SNAPPY_HAVE_SSSE3 1 +#else +#define SNAPPY_HAVE_SSSE3 0 +#endif +#endif // !defined(SNAPPY_HAVE_SSSE3) + +#if !defined(SNAPPY_HAVE_BMI2) +// __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2 +// specifically, but it does define __AVX2__ when AVX2 support is available. +// Fortunately, AVX2 was introduced in Haswell, just like BMI2. +// +// BMI2 is not defined as a subset of AVX2 (unlike SSSE3 and AVX above). So, +// GCC and Clang can build code with AVX2 enabled but BMI2 disabled, in which +// case issuing BMI2 instructions results in a compiler error. +#if defined(__BMI2__) || (defined(_MSC_VER) && defined(__AVX2__)) +#define SNAPPY_HAVE_BMI2 1 +#else +#define SNAPPY_HAVE_BMI2 0 +#endif +#endif // !defined(SNAPPY_HAVE_BMI2) + +#if SNAPPY_HAVE_SSSE3 +// Please do not replace with . or with headers that assume more +// advanced SSE versions without checking with all the OWNERS. +#include +#endif + +#if SNAPPY_HAVE_BMI2 +// Please do not replace with . or with headers that assume more +// advanced SSE versions without checking with all the OWNERS. +#include +#endif + #include #include #include #include - namespace snappy { using internal::COPY_1_BYTE_OFFSET; using internal::COPY_2_BYTE_OFFSET; -using internal::COPY_4_BYTE_OFFSET; using internal::LITERAL; using internal::char_table; using internal::kMaximumTagLength; -using internal::wordmask; // Any hash function will produce a valid compressed bitstream, but a good // hash function reduces the number of collisions and thus yields better @@ -84,154 +119,314 @@ size_t MaxCompressedLength(size_t source_len) { return 32 + source_len + source_len/6; } -// Copy "len" bytes from "src" to "op", one byte at a time. Used for -// handling COPY operations where the input and output regions may -// overlap. For example, suppose: -// src == "ab" -// op == src + 2 -// len == 20 -// After IncrementalCopy(src, op, len), the result will have -// eleven copies of "ab" -// ababababababababababab -// Note that this does not match the semantics of either memcpy() -// or memmove(). -static inline void IncrementalCopy(const char* src, char* op, ssize_t len) { - assert(len > 0); - do { - *op++ = *src++; - } while (--len > 0); -} - -// Equivalent to IncrementalCopy except that it can write up to ten extra -// bytes after the end of the copy, and that it is faster. -// -// The main part of this loop is a simple copy of eight bytes at a time until -// we've copied (at least) the requested amount of bytes. However, if op and -// src are less than eight bytes apart (indicating a repeating pattern of -// length < 8), we first need to expand the pattern in order to get the correct -// results. For instance, if the buffer looks like this, with the eight-byte -// and patterns marked as intervals: -// -// abxxxxxxxxxxxx -// [------] src -// [------] op -// -// a single eight-byte copy from to will repeat the pattern once, -// after which we can move two bytes without moving : -// -// ababxxxxxxxxxx -// [------] src -// [------] op -// -// and repeat the exercise until the two no longer overlap. -// -// This allows us to do very well in the special case of one single byte -// repeated many times, without taking a big hit for more general cases. -// -// The worst case of extra writing past the end of the match occurs when -// op - src == 1 and len == 1; the last copy will read from byte positions -// [0..7] and write to [4..11], whereas it was only supposed to write to -// position 1. Thus, ten excess bytes. - namespace { -const int kMaxIncrementCopyOverflow = 10; +void UnalignedCopy64(const void* src, void* dst) { + char tmp[8]; + memcpy(tmp, src, 8); + memcpy(dst, tmp, 8); +} -inline void IncrementalCopyFastPath(const char* src, char* op, ssize_t len) { - while (PREDICT_FALSE(op - src < 8)) { - UnalignedCopy64(src, op); - len -= op - src; - op += op - src; +void UnalignedCopy128(const void* src, void* dst) { + // memcpy gets vectorized when the appropriate compiler options are used. + // For example, x86 compilers targeting SSE2+ will optimize to an SSE2 load + // and store. + char tmp[16]; + memcpy(tmp, src, 16); + memcpy(dst, tmp, 16); +} + +// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used +// for handling COPY operations where the input and output regions may overlap. +// For example, suppose: +// src == "ab" +// op == src + 2 +// op_limit == op + 20 +// After IncrementalCopySlow(src, op, op_limit), the result will have eleven +// copies of "ab" +// ababababababababababab +// Note that this does not match the semantics of either memcpy() or memmove(). +inline char* IncrementalCopySlow(const char* src, char* op, + char* const op_limit) { + // TODO: Remove pragma when LLVM is aware this + // function is only called in cold regions and when cold regions don't get + // vectorized or unrolled. +#ifdef __clang__ +#pragma clang loop unroll(disable) +#endif + while (op < op_limit) { + *op++ = *src++; } - while (len > 0) { + return op_limit; +} + +#if SNAPPY_HAVE_SSSE3 + +// This is a table of shuffle control masks that can be used as the source +// operand for PSHUFB to permute the contents of the destination XMM register +// into a repeating byte pattern. +alignas(16) const char pshufb_fill_patterns[7][16] = { + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}, + {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0}, + {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3}, + {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0}, + {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3}, + {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1}, +}; + +#endif // SNAPPY_HAVE_SSSE3 + +// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) but faster than +// IncrementalCopySlow. buf_limit is the address past the end of the writable +// region of the buffer. +inline char* IncrementalCopy(const char* src, char* op, char* const op_limit, + char* const buf_limit) { + // Terminology: + // + // slop = buf_limit - op + // pat = op - src + // len = limit - op + assert(src < op); + assert(op <= op_limit); + assert(op_limit <= buf_limit); + // NOTE: The compressor always emits 4 <= len <= 64. It is ok to assume that + // to optimize this function but we have to also handle other cases in case + // the input does not satisfy these conditions. + + size_t pattern_size = op - src; + // The cases are split into different branches to allow the branch predictor, + // FDO, and static prediction hints to work better. For each input we list the + // ratio of invocations that match each condition. + // + // input slop < 16 pat < 8 len > 16 + // ------------------------------------------ + // html|html4|cp 0% 1.01% 27.73% + // urls 0% 0.88% 14.79% + // jpg 0% 64.29% 7.14% + // pdf 0% 2.56% 58.06% + // txt[1-4] 0% 0.23% 0.97% + // pb 0% 0.96% 13.88% + // bin 0.01% 22.27% 41.17% + // + // It is very rare that we don't have enough slop for doing block copies. It + // is also rare that we need to expand a pattern. Small patterns are common + // for incompressible formats and for those we are plenty fast already. + // Lengths are normally not greater than 16 but they vary depending on the + // input. In general if we always predict len <= 16 it would be an ok + // prediction. + // + // In order to be fast we want a pattern >= 8 bytes and an unrolled loop + // copying 2x 8 bytes at a time. + + // Handle the uncommon case where pattern is less than 8 bytes. + if (SNAPPY_PREDICT_FALSE(pattern_size < 8)) { +#if SNAPPY_HAVE_SSSE3 + // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB + // to permute the register's contents in-place into a repeating sequence of + // the first "pattern_size" bytes. + // For example, suppose: + // src == "abc" + // op == op + 3 + // After _mm_shuffle_epi8(), "pattern" will have five copies of "abc" + // followed by one byte of slop: abcabcabcabcabca. + // + // The non-SSE fallback implementation suffers from store-forwarding stalls + // because its loads and stores partly overlap. By expanding the pattern + // in-place, we avoid the penalty. + if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 16)) { + const __m128i shuffle_mask = _mm_load_si128( + reinterpret_cast(pshufb_fill_patterns) + + pattern_size - 1); + const __m128i pattern = _mm_shuffle_epi8( + _mm_loadl_epi64(reinterpret_cast(src)), shuffle_mask); + // Uninitialized bytes are masked out by the shuffle mask. + // TODO: remove annotation and macro defs once MSan is fixed. + SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(&pattern, sizeof(pattern)); + pattern_size *= 16 / pattern_size; + char* op_end = std::min(op_limit, buf_limit - 15); + while (op < op_end) { + _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern); + op += pattern_size; + } + if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit; + } + return IncrementalCopySlow(src, op, op_limit); +#else // !SNAPPY_HAVE_SSSE3 + // If plenty of buffer space remains, expand the pattern to at least 8 + // bytes. The way the following loop is written, we need 8 bytes of buffer + // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10 + // bytes if pattern_size is 2. Precisely encoding that is probably not + // worthwhile; instead, invoke the slow path if we cannot write 11 bytes + // (because 11 are required in the worst case). + if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) { + while (pattern_size < 8) { + UnalignedCopy64(src, op); + op += pattern_size; + pattern_size *= 2; + } + if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit; + } else { + return IncrementalCopySlow(src, op, op_limit); + } +#endif // SNAPPY_HAVE_SSSE3 + } + assert(pattern_size >= 8); + + // Copy 2x 8 bytes at a time. Because op - src can be < 16, a single + // UnalignedCopy128 might overwrite data in op. UnalignedCopy64 is safe + // because expanding the pattern to at least 8 bytes guarantees that + // op - src >= 8. + // + // Typically, the op_limit is the gating factor so try to simplify the loop + // based on that. + if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 16)) { + // There is at least one, and at most four 16-byte blocks. Writing four + // conditionals instead of a loop allows FDO to layout the code with respect + // to the actual probabilities of each length. + // TODO: Replace with loop with trip count hint. + UnalignedCopy64(src, op); + UnalignedCopy64(src + 8, op + 8); + + if (op + 16 < op_limit) { + UnalignedCopy64(src + 16, op + 16); + UnalignedCopy64(src + 24, op + 24); + } + if (op + 32 < op_limit) { + UnalignedCopy64(src + 32, op + 32); + UnalignedCopy64(src + 40, op + 40); + } + if (op + 48 < op_limit) { + UnalignedCopy64(src + 48, op + 48); + UnalignedCopy64(src + 56, op + 56); + } + return op_limit; + } + + // Fall back to doing as much as we can with the available slop in the + // buffer. This code path is relatively cold however so we save code size by + // avoiding unrolling and vectorizing. + // + // TODO: Remove pragma when when cold regions don't get vectorized + // or unrolled. +#ifdef __clang__ +#pragma clang loop unroll(disable) +#endif + for (char *op_end = buf_limit - 16; op < op_end; op += 16, src += 16) { + UnalignedCopy64(src, op); + UnalignedCopy64(src + 8, op + 8); + } + if (op >= op_limit) + return op_limit; + + // We only take this branch if we didn't have enough slop and we can do a + // single 8 byte copy. + if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) { UnalignedCopy64(src, op); src += 8; op += 8; - len -= 8; } + return IncrementalCopySlow(src, op, op_limit); } } // namespace +template static inline char* EmitLiteral(char* op, const char* literal, - int len, - bool allow_fast_path) { - int n = len - 1; // Zero-length literals are disallowed - if (n < 60) { + int len) { + // The vast majority of copies are below 16 bytes, for which a + // call to memcpy is overkill. This fast path can sometimes + // copy up to 15 bytes too much, but that is okay in the + // main loop, since we have a bit to go on for both sides: + // + // - The input will always have kInputMarginBytes = 15 extra + // available bytes, as long as we're in the main loop, and + // if not, allow_fast_path = false. + // - The output will always have 32 spare bytes (see + // MaxCompressedLength). + assert(len > 0); // Zero-length literals are disallowed + int n = len - 1; + if (allow_fast_path && len <= 16) { // Fits in tag byte *op++ = LITERAL | (n << 2); - // The vast majority of copies are below 16 bytes, for which a - // call to memcpy is overkill. This fast path can sometimes - // copy up to 15 bytes too much, but that is okay in the - // main loop, since we have a bit to go on for both sides: - // - // - The input will always have kInputMarginBytes = 15 extra - // available bytes, as long as we're in the main loop, and - // if not, allow_fast_path = false. - // - The output will always have 32 spare bytes (see - // MaxCompressedLength). - if (allow_fast_path && len <= 16) { - UnalignedCopy64(literal, op); - UnalignedCopy64(literal + 8, op + 8); - return op + len; - } + UnalignedCopy128(literal, op); + return op + len; + } + + if (n < 60) { + // Fits in tag byte + *op++ = LITERAL | (n << 2); } else { - // Encode in upcoming bytes - char* base = op; - int count = 0; - op++; - while (n > 0) { - *op++ = n & 0xff; - n >>= 8; - count++; - } + int count = (Bits::Log2Floor(n) >> 3) + 1; assert(count >= 1); assert(count <= 4); - *base = LITERAL | ((59+count) << 2); + *op++ = LITERAL | ((59 + count) << 2); + // Encode in upcoming bytes. + // Write 4 bytes, though we may care about only 1 of them. The output buffer + // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds + // here and there is a memcpy of size 'len' below. + LittleEndian::Store32(op, n); + op += count; } memcpy(op, literal, len); return op + len; } -static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) { +template +static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) { assert(len <= 64); assert(len >= 4); assert(offset < 65536); + assert(len_less_than_12 == (len < 12)); - if ((len < 12) && (offset < 2048)) { - size_t len_minus_4 = len - 4; - assert(len_minus_4 < 8); // Must fit in 3 bits - *op++ = COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8) << 5); + if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) { + // offset fits in 11 bits. The 3 highest go in the top of the first byte, + // and the rest go in the second byte. + *op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0); *op++ = offset & 0xff; } else { - *op++ = COPY_2_BYTE_OFFSET + ((len-1) << 2); - LittleEndian::Store16(op, offset); - op += 2; + // Write 4 bytes, though we only care about 3 of them. The output buffer + // is required to have some slack, so the extra byte won't overrun it. + uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8); + LittleEndian::Store32(op, u); + op += 3; } return op; } -static inline char* EmitCopy(char* op, size_t offset, int len) { - // Emit 64 byte copies but make sure to keep at least four bytes reserved - while (PREDICT_FALSE(len >= 68)) { - op = EmitCopyLessThan64(op, offset, 64); - len -= 64; - } +template +static inline char* EmitCopy(char* op, size_t offset, size_t len) { + assert(len_less_than_12 == (len < 12)); + if (len_less_than_12) { + return EmitCopyAtMost64(op, offset, len); + } else { + // A special case for len <= 64 might help, but so far measurements suggest + // it's in the noise. - // Emit an extra 60 byte copy if have too much data to fit in one copy - if (len > 64) { - op = EmitCopyLessThan64(op, offset, 60); - len -= 60; - } + // Emit 64 byte copies but make sure to keep at least four bytes reserved. + while (SNAPPY_PREDICT_FALSE(len >= 68)) { + op = EmitCopyAtMost64(op, offset, 64); + len -= 64; + } - // Emit remainder - op = EmitCopyLessThan64(op, offset, len); - return op; + // One or two copies will now finish the job. + if (len > 64) { + op = EmitCopyAtMost64(op, offset, 60); + len -= 60; + } + + // Emit remainder. + if (len < 12) { + op = EmitCopyAtMost64(op, offset, len); + } else { + op = EmitCopyAtMost64(op, offset, len); + } + return op; + } } - bool GetUncompressedLength(const char* start, size_t n, size_t* result) { uint32 v = 0; const char* limit = start + n; @@ -243,31 +438,45 @@ bool GetUncompressedLength(const char* start, size_t n, size_t* result) { } } +namespace { +uint32 CalculateTableSize(uint32 input_size) { + static_assert( + kMaxHashTableSize >= kMinHashTableSize, + "kMaxHashTableSize should be greater or equal to kMinHashTableSize."); + if (input_size > kMaxHashTableSize) { + return kMaxHashTableSize; + } + if (input_size < kMinHashTableSize) { + return kMinHashTableSize; + } + // This is equivalent to Log2Ceiling(input_size), assuming input_size > 1. + // 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)). + return 2u << Bits::Log2Floor(input_size - 1); +} +} // namespace + namespace internal { -uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) { - // Use smaller hash table when input.size() is smaller, since we - // fill the table, incurring O(hash table size) overhead for - // compression, and if the input is short, we won't need that - // many hash table entries anyway. - assert(kMaxHashTableSize >= 256); - size_t htsize = 256; - while (htsize < kMaxHashTableSize && htsize < input_size) { - htsize <<= 1; - } +WorkingMemory::WorkingMemory(size_t input_size) { + const size_t max_fragment_size = std::min(input_size, kBlockSize); + const size_t table_size = CalculateTableSize(max_fragment_size); + size_ = table_size * sizeof(*table_) + max_fragment_size + + MaxCompressedLength(max_fragment_size); + mem_ = std::allocator().allocate(size_); + table_ = reinterpret_cast(mem_); + input_ = mem_ + table_size * sizeof(*table_); + output_ = input_ + max_fragment_size; +} - uint16* table; - if (htsize <= ARRAYSIZE(small_table_)) { - table = small_table_; - } else { - if (large_table_ == NULL) { - large_table_ = new uint16[kMaxHashTableSize]; - } - table = large_table_; - } +WorkingMemory::~WorkingMemory() { + std::allocator().deallocate(mem_, size_); +} +uint16* WorkingMemory::GetHashTable(size_t fragment_size, + int* table_size) const { + const size_t htsize = CalculateTableSize(fragment_size); + memset(table_, 0, htsize * sizeof(*table_)); *table_size = htsize; - memset(table, 0, htsize * sizeof(*table)); - return table; + return table_; } } // end namespace internal @@ -334,7 +543,7 @@ char* CompressFragment(const char* input, // "ip" is the input pointer, and "op" is the output pointer. const char* ip = input; assert(input_size <= kBlockSize); - assert((table_size & (table_size - 1)) == 0); // table must be power of two + assert((table_size & (table_size - 1)) == 0); // table must be power of two const int shift = 32 - Bits::Log2Floor(table_size); assert(static_cast(kuint32max >> shift) == table_size - 1); const char* ip_end = input + input_size; @@ -344,7 +553,7 @@ char* CompressFragment(const char* input, const char* next_emit = ip; const size_t kInputMarginBytes = 15; - if (PREDICT_TRUE(input_size >= kInputMarginBytes)) { + if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) { const char* ip_limit = input + input_size - kInputMarginBytes; for (uint32 next_hash = Hash(++ip, shift); ; ) { @@ -385,7 +594,7 @@ char* CompressFragment(const char* input, uint32 bytes_between_hash_lookups = skip >> 5; skip += bytes_between_hash_lookups; next_ip = ip + bytes_between_hash_lookups; - if (PREDICT_FALSE(next_ip > ip_limit)) { + if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) { goto emit_remainder; } next_hash = Hash(next_ip, shift); @@ -394,14 +603,14 @@ char* CompressFragment(const char* input, assert(candidate < ip); table[hash] = ip - base_ip; - } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) != - UNALIGNED_LOAD32(candidate))); + } while (SNAPPY_PREDICT_TRUE(UNALIGNED_LOAD32(ip) != + UNALIGNED_LOAD32(candidate))); // Step 2: A 4-byte match has been found. We'll later see if more // than 4 bytes match. But, prior to the match, input // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes." assert(next_emit + 16 <= ip_end); - op = EmitLiteral(op, next_emit, ip - next_emit, true); + op = EmitLiteral(op, next_emit, ip - next_emit); // Step 3: Call EmitCopy, and then see if another EmitCopy could // be our next move. Repeat until we find no match for the @@ -418,19 +627,25 @@ char* CompressFragment(const char* input, // We have a 4-byte match at ip, and no need to emit any // "literal bytes" prior to ip. const char* base = ip; - int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end); + std::pair p = + FindMatchLength(candidate + 4, ip + 4, ip_end); + size_t matched = 4 + p.first; ip += matched; size_t offset = base - candidate; assert(0 == memcmp(base, candidate, matched)); - op = EmitCopy(op, offset, matched); - // We could immediately start working at ip now, but to improve - // compression we first update table[Hash(ip - 1, ...)]. - const char* insert_tail = ip - 1; + if (p.second) { + op = EmitCopy(op, offset, matched); + } else { + op = EmitCopy(op, offset, matched); + } next_emit = ip; - if (PREDICT_FALSE(ip >= ip_limit)) { + if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) { goto emit_remainder; } - input_bytes = GetEightBytesAt(insert_tail); + // We are now looking for a 4-byte match again. We read + // table[Hash(ip, shift)] for that. To improve compression, + // we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)]. + input_bytes = GetEightBytesAt(ip - 1); uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift); table[prev_hash] = ip - base_ip - 1; uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift); @@ -447,13 +662,18 @@ char* CompressFragment(const char* input, emit_remainder: // Emit the remaining bytes as a literal if (next_emit < ip_end) { - op = EmitLiteral(op, next_emit, ip_end - next_emit, false); + op = EmitLiteral(op, next_emit, + ip_end - next_emit); } return op; } } // end namespace internal +// Called back at avery compression call to trace parameters and sizes. +static inline void Report(const char *algorithm, size_t compressed_size, + size_t uncompressed_size) {} + // Signature of output types needed by decompression code. // The decompression code is templatized on a type that obeys this // signature so that we do not pay virtual function call overhead in @@ -494,6 +714,28 @@ char* CompressFragment(const char* input, // bool TryFastAppend(const char* ip, size_t available, size_t length); // }; +static inline uint32 ExtractLowBytes(uint32 v, int n) { + assert(n >= 0); + assert(n <= 4); +#if SNAPPY_HAVE_BMI2 + return _bzhi_u32(v, 8 * n); +#else + // This needs to be wider than uint32 otherwise `mask << 32` will be + // undefined. + uint64 mask = 0xffffffff; + return v & ~(mask << (8 * n)); +#endif +} + +static inline bool LeftShiftOverflows(uint8 value, uint32 shift) { + assert(shift < 32); + static const uint8 masks[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe}; + return (value & masks[shift]) != 0; +} // Helper class for decompression class SnappyDecompressor { @@ -532,7 +774,7 @@ class SnappyDecompressor { } // Read the uncompressed length stored at the start of the compressed data. - // On succcess, stores the length in *result and returns true. + // On success, stores the length in *result and returns true. // On failure, returns false. bool ReadUncompressedLength(uint32* result) { assert(ip_ == NULL); // Must not have read anything yet @@ -547,7 +789,7 @@ class SnappyDecompressor { const unsigned char c = *(reinterpret_cast(ip)); reader_->Skip(1); uint32 val = c & 0x7f; - if (((val << shift) >> shift) != val) return false; + if (LeftShiftOverflows(static_cast(val), shift)) return false; *result |= val << shift; if (c < 128) { break; @@ -560,9 +802,27 @@ class SnappyDecompressor { // Process the next item found in the input. // Returns true if successful, false on error or end of input. template +#if defined(__GNUC__) && defined(__x86_64__) + __attribute__((aligned(32))) +#endif void DecompressAllTags(Writer* writer) { - const char* ip = ip_; + // In x86, pad the function body to start 16 bytes later. This function has + // a couple of hotspots that are highly sensitive to alignment: we have + // observed regressions by more than 20% in some metrics just by moving the + // exact same code to a different position in the benchmark binary. + // + // Putting this code on a 32-byte-aligned boundary + 16 bytes makes us hit + // the "lucky" case consistently. Unfortunately, this is a very brittle + // workaround, and future differences in code generation may reintroduce + // this regression. If you experience a big, difficult to explain, benchmark + // performance regression here, first try removing this hack. +#if defined(__GNUC__) && defined(__x86_64__) + // Two 8-byte "NOP DWORD ptr [EAX + EAX*1 + 00000000H]" instructions. + asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00"); + asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00"); +#endif + const char* ip = ip_; // We could have put this refill fragment only at the beginning of the loop. // However, duplicating it at the end of each branch gives the compiler more // scope to optimize the expression based on the local @@ -578,21 +838,34 @@ class SnappyDecompressor { for ( ;; ) { const unsigned char c = *(reinterpret_cast(ip++)); - if ((c & 0x3) == LITERAL) { + // Ratio of iterations that have LITERAL vs non-LITERAL for different + // inputs. + // + // input LITERAL NON_LITERAL + // ----------------------------------- + // html|html4|cp 23% 77% + // urls 36% 64% + // jpg 47% 53% + // pdf 19% 81% + // txt[1-4] 25% 75% + // pb 24% 76% + // bin 24% 76% + if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) { size_t literal_length = (c >> 2) + 1u; if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) { assert(literal_length < 61); ip += literal_length; - // NOTE(user): There is no MAYBE_REFILL() here, as TryFastAppend() + // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend() // will not return true unless there's already at least five spare // bytes in addition to the literal. continue; } - if (PREDICT_FALSE(literal_length >= 61)) { + if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) { // Long literal. const size_t literal_length_length = literal_length - 60; literal_length = - (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1; + ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) + + 1; ip += literal_length_length; } @@ -614,15 +887,16 @@ class SnappyDecompressor { ip += literal_length; MAYBE_REFILL(); } else { - const uint32 entry = char_table[c]; - const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11]; - const uint32 length = entry & 0xff; + const size_t entry = char_table[c]; + const size_t trailer = + ExtractLowBytes(LittleEndian::Load32(ip), entry >> 11); + const size_t length = entry & 0xff; ip += entry >> 11; // copy_offset/256 is encoded in bits 8..10. By just fetching // those bits, we get copy_offset (since the bit-field starts at // bit 8). - const uint32 copy_offset = entry & 0x700; + const size_t copy_offset = entry & 0x700; if (!writer->AppendFromSelf(copy_offset + trailer, length)) { return; } @@ -642,10 +916,8 @@ bool SnappyDecompressor::RefillTag() { size_t n; ip = reader_->Peek(&n); peeked_ = n; - if (n == 0) { - eof_ = true; - return false; - } + eof_ = (n == 0); + if (eof_) return false; ip_limit_ = ip + n; } @@ -670,7 +942,7 @@ bool SnappyDecompressor::RefillTag() { size_t length; const char* src = reader_->Peek(&length); if (length == 0) return false; - uint32 to_add = min(needed - nbuf, length); + uint32 to_add = std::min(needed - nbuf, length); memcpy(scratch_ + nbuf, src, to_add); nbuf += to_add; reader_->Skip(to_add); @@ -699,13 +971,18 @@ static bool InternalUncompress(Source* r, Writer* writer) { SnappyDecompressor decompressor(r); uint32 uncompressed_len = 0; if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false; - return InternalUncompressAllTags(&decompressor, writer, uncompressed_len); + + return InternalUncompressAllTags(&decompressor, writer, r->Available(), + uncompressed_len); } template static bool InternalUncompressAllTags(SnappyDecompressor* decompressor, Writer* writer, + uint32 compressed_len, uint32 uncompressed_len) { + Report("snappy_uncompress", compressed_len, uncompressed_len); + writer->SetExpectedLength(uncompressed_len); // Process the entire input @@ -722,21 +999,20 @@ bool GetUncompressedLength(Source* source, uint32* result) { size_t Compress(Source* reader, Sink* writer) { size_t written = 0; size_t N = reader->Available(); + const size_t uncompressed_size = N; char ulength[Varint::kMax32]; char* p = Varint::Encode32(ulength, N); writer->Append(ulength, p-ulength); written += (p - ulength); - internal::WorkingMemory wmem; - char* scratch = NULL; - char* scratch_output = NULL; + internal::WorkingMemory wmem(N); while (N > 0) { // Get next block to compress (without copying if possible) size_t fragment_size; const char* fragment = reader->Peek(&fragment_size); assert(fragment_size != 0); // premature end of input - const size_t num_to_read = min(N, kBlockSize); + const size_t num_to_read = std::min(N, kBlockSize); size_t bytes_read = fragment_size; size_t pending_advance = 0; @@ -745,19 +1021,13 @@ size_t Compress(Source* reader, Sink* writer) { pending_advance = num_to_read; fragment_size = num_to_read; } else { - // Read into scratch buffer - if (scratch == NULL) { - // If this is the last iteration, we want to allocate N bytes - // of space, otherwise the max possible kBlockSize space. - // num_to_read contains exactly the correct value - scratch = new char[num_to_read]; - } + char* scratch = wmem.GetScratchInput(); memcpy(scratch, fragment, bytes_read); reader->Skip(bytes_read); while (bytes_read < num_to_read) { fragment = reader->Peek(&fragment_size); - size_t n = min(fragment_size, num_to_read - bytes_read); + size_t n = std::min(fragment_size, num_to_read - bytes_read); memcpy(scratch + bytes_read, fragment, n); bytes_read += n; reader->Skip(n); @@ -777,16 +1047,13 @@ size_t Compress(Source* reader, Sink* writer) { // Need a scratch buffer for the output, in case the byte sink doesn't // have room for us directly. - if (scratch_output == NULL) { - scratch_output = new char[max_output]; - } else { - // Since we encode kBlockSize regions followed by a region - // which is <= kBlockSize in length, a previously allocated - // scratch_output[] region is big enough for this iteration. - } - char* dest = writer->GetAppendBuffer(max_output, scratch_output); - char* end = internal::CompressFragment(fragment, fragment_size, - dest, table, table_size); + + // Since we encode kBlockSize regions followed by a region + // which is <= kBlockSize in length, a previously allocated + // scratch_output[] region is big enough for this iteration. + char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput()); + char* end = internal::CompressFragment(fragment, fragment_size, dest, table, + table_size); writer->Append(dest, end - dest); written += (end - dest); @@ -794,8 +1061,7 @@ size_t Compress(Source* reader, Sink* writer) { reader->Skip(pending_advance); } - delete[] scratch; - delete[] scratch_output; + Report("snappy_compress", written, uncompressed_size); return written; } @@ -809,14 +1075,22 @@ size_t Compress(Source* reader, Sink* writer) { // Writer template argument to SnappyDecompressor::DecompressAllTags(). class SnappyIOVecWriter { private: + // output_iov_end_ is set to iov + count and used to determine when + // the end of the iovs is reached. + const struct iovec* output_iov_end_; + +#if !defined(NDEBUG) const struct iovec* output_iov_; - const size_t output_iov_count_; +#endif // !defined(NDEBUG) - // We are currently writing into output_iov_[curr_iov_index_]. - size_t curr_iov_index_; + // Current iov that is being written into. + const struct iovec* curr_iov_; - // Bytes written to output_iov_[curr_iov_index_] so far. - size_t curr_iov_written_; + // Pointer to current iov's write location. + char* curr_iov_output_; + + // Remaining bytes to write into curr_iov_output. + size_t curr_iov_remaining_; // Total bytes decompressed into output_iov_ so far. size_t total_written_; @@ -824,22 +1098,24 @@ class SnappyIOVecWriter { // Maximum number of bytes that will be decompressed into output_iov_. size_t output_limit_; - inline char* GetIOVecPointer(size_t index, size_t offset) { - return reinterpret_cast(output_iov_[index].iov_base) + - offset; + static inline char* GetIOVecPointer(const struct iovec* iov, size_t offset) { + return reinterpret_cast(iov->iov_base) + offset; } public: // Does not take ownership of iov. iov must be valid during the // entire lifetime of the SnappyIOVecWriter. inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count) - : output_iov_(iov), - output_iov_count_(iov_count), - curr_iov_index_(0), - curr_iov_written_(0), + : output_iov_end_(iov + iov_count), +#if !defined(NDEBUG) + output_iov_(iov), +#endif // !defined(NDEBUG) + curr_iov_(iov), + curr_iov_output_(iov_count ? reinterpret_cast(iov->iov_base) + : nullptr), + curr_iov_remaining_(iov_count ? iov->iov_len : 0), total_written_(0), - output_limit_(-1) { - } + output_limit_(-1) {} inline void SetExpectedLength(size_t len) { output_limit_ = len; @@ -854,23 +1130,25 @@ class SnappyIOVecWriter { return false; } + return AppendNoCheck(ip, len); + } + + inline bool AppendNoCheck(const char* ip, size_t len) { while (len > 0) { - assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len); - if (curr_iov_written_ >= output_iov_[curr_iov_index_].iov_len) { + if (curr_iov_remaining_ == 0) { // This iovec is full. Go to the next one. - if (curr_iov_index_ + 1 >= output_iov_count_) { + if (curr_iov_ + 1 >= output_iov_end_) { return false; } - curr_iov_written_ = 0; - ++curr_iov_index_; + ++curr_iov_; + curr_iov_output_ = reinterpret_cast(curr_iov_->iov_base); + curr_iov_remaining_ = curr_iov_->iov_len; } - const size_t to_write = std::min( - len, output_iov_[curr_iov_index_].iov_len - curr_iov_written_); - memcpy(GetIOVecPointer(curr_iov_index_, curr_iov_written_), - ip, - to_write); - curr_iov_written_ += to_write; + const size_t to_write = std::min(len, curr_iov_remaining_); + memcpy(curr_iov_output_, ip, to_write); + curr_iov_output_ += to_write; + curr_iov_remaining_ -= to_write; total_written_ += to_write; ip += to_write; len -= to_write; @@ -882,12 +1160,11 @@ class SnappyIOVecWriter { inline bool TryFastAppend(const char* ip, size_t available, size_t len) { const size_t space_left = output_limit_ - total_written_; if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 && - output_iov_[curr_iov_index_].iov_len - curr_iov_written_ >= 16) { + curr_iov_remaining_ >= 16) { // Fast path, used for the majority (about 95%) of invocations. - char* ptr = GetIOVecPointer(curr_iov_index_, curr_iov_written_); - UnalignedCopy64(ip, ptr); - UnalignedCopy64(ip + 8, ptr + 8); - curr_iov_written_ += len; + UnalignedCopy128(ip, curr_iov_output_); + curr_iov_output_ += len; + curr_iov_remaining_ -= len; total_written_ += len; return true; } @@ -896,7 +1173,9 @@ class SnappyIOVecWriter { } inline bool AppendFromSelf(size_t offset, size_t len) { - if (offset > total_written_ || offset == 0) { + // See SnappyArrayWriter::AppendFromSelf for an explanation of + // the "offset - 1u" trick. + if (offset - 1u >= total_written_) { return false; } const size_t space_left = output_limit_ - total_written_; @@ -905,8 +1184,8 @@ class SnappyIOVecWriter { } // Locate the iovec from which we need to start the copy. - size_t from_iov_index = curr_iov_index_; - size_t from_iov_offset = curr_iov_written_; + const iovec* from_iov = curr_iov_; + size_t from_iov_offset = curr_iov_->iov_len - curr_iov_remaining_; while (offset > 0) { if (from_iov_offset >= offset) { from_iov_offset -= offset; @@ -914,46 +1193,47 @@ class SnappyIOVecWriter { } offset -= from_iov_offset; - assert(from_iov_index > 0); - --from_iov_index; - from_iov_offset = output_iov_[from_iov_index].iov_len; + --from_iov; +#if !defined(NDEBUG) + assert(from_iov >= output_iov_); +#endif // !defined(NDEBUG) + from_iov_offset = from_iov->iov_len; } // Copy bytes starting from the iovec pointed to by from_iov_index to // the current iovec. while (len > 0) { - assert(from_iov_index <= curr_iov_index_); - if (from_iov_index != curr_iov_index_) { - const size_t to_copy = std::min( - output_iov_[from_iov_index].iov_len - from_iov_offset, - len); - Append(GetIOVecPointer(from_iov_index, from_iov_offset), to_copy); + assert(from_iov <= curr_iov_); + if (from_iov != curr_iov_) { + const size_t to_copy = + std::min(from_iov->iov_len - from_iov_offset, len); + AppendNoCheck(GetIOVecPointer(from_iov, from_iov_offset), to_copy); len -= to_copy; if (len > 0) { - ++from_iov_index; + ++from_iov; from_iov_offset = 0; } } else { - assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len); - size_t to_copy = std::min(output_iov_[curr_iov_index_].iov_len - - curr_iov_written_, - len); + size_t to_copy = curr_iov_remaining_; if (to_copy == 0) { // This iovec is full. Go to the next one. - if (curr_iov_index_ + 1 >= output_iov_count_) { + if (curr_iov_ + 1 >= output_iov_end_) { return false; } - ++curr_iov_index_; - curr_iov_written_ = 0; + ++curr_iov_; + curr_iov_output_ = reinterpret_cast(curr_iov_->iov_base); + curr_iov_remaining_ = curr_iov_->iov_len; continue; } if (to_copy > len) { to_copy = len; } - IncrementalCopy(GetIOVecPointer(from_iov_index, from_iov_offset), - GetIOVecPointer(curr_iov_index_, curr_iov_written_), - to_copy); - curr_iov_written_ += to_copy; + + IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset), + curr_iov_output_, curr_iov_output_ + to_copy, + curr_iov_output_ + curr_iov_remaining_); + curr_iov_output_ += to_copy; + curr_iov_remaining_ -= to_copy; from_iov_offset += to_copy; total_written_ += to_copy; len -= to_copy; @@ -1022,8 +1302,7 @@ class SnappyArrayWriter { const size_t space_left = op_limit_ - op; if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) { // Fast path, used for the majority (about 95%) of invocations. - UnalignedCopy64(ip, op); - UnalignedCopy64(ip + 8, op + 8); + UnalignedCopy128(ip, op); op_ = op + len; return true; } else { @@ -1032,8 +1311,7 @@ class SnappyArrayWriter { } inline bool AppendFromSelf(size_t offset, size_t len) { - char* op = op_; - const size_t space_left = op_limit_ - op; + char* const op_end = op_ + len; // Check if we try to append from before the start of the buffer. // Normally this would just be a check for "produced < offset", @@ -1042,30 +1320,13 @@ class SnappyArrayWriter { // to a very big number. This is convenient, as offset==0 is another // invalid case that we also want to catch, so that we do not go // into an infinite loop. - assert(op >= base_); - size_t produced = op - base_; - if (produced <= offset - 1u) { - return false; - } - if (len <= 16 && offset >= 8 && space_left >= 16) { - // Fast path, used for the majority (70-80%) of dynamic invocations. - UnalignedCopy64(op - offset, op); - UnalignedCopy64(op - offset + 8, op + 8); - } else { - if (space_left >= len + kMaxIncrementCopyOverflow) { - IncrementalCopyFastPath(op - offset, op, len); - } else { - if (space_left < len) { - return false; - } - IncrementalCopy(op - offset, op, len); - } - } + if (Produced() <= offset - 1u || op_end > op_limit_) return false; + op_ = IncrementalCopy(op_ - offset, op_, op_end, op_limit_); - op_ = op + len; return true; } inline size_t Produced() const { + assert(op_ >= base_); return op_ - base_; } inline void Flush() {} @@ -1081,7 +1342,7 @@ bool RawUncompress(Source* compressed, char* uncompressed) { return InternalUncompress(compressed, &output); } -bool Uncompress(const char* compressed, size_t n, string* uncompressed) { +bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) { size_t ulength; if (!GetUncompressedLength(compressed, n, &ulength)) { return false; @@ -1149,9 +1410,10 @@ void RawCompress(const char* input, *compressed_length = (writer.CurrentDestination() - compressed); } -size_t Compress(const char* input, size_t input_length, string* compressed) { +size_t Compress(const char* input, size_t input_length, + std::string* compressed) { // Pre-grow the buffer to the max length of the compressed output - compressed->resize(MaxCompressedLength(input_length)); + STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length)); size_t compressed_length; RawCompress(input, input_length, string_as_array(compressed), @@ -1174,7 +1436,7 @@ class SnappyScatteredWriter { // We need random access into the data generated so far. Therefore // we keep track of all of the generated data as an array of blocks. // All of the blocks except the last have length kBlockSize. - vector blocks_; + std::vector blocks_; size_t expected_; // Total size of all fully generated blocks so far @@ -1233,8 +1495,7 @@ class SnappyScatteredWriter { if (length <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) { // Fast path, used for the majority (about 95%) of invocations. - UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip)); - UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8)); + UnalignedCopy128(ip, op); op_ptr_ = op + length; return true; } else { @@ -1243,16 +1504,14 @@ class SnappyScatteredWriter { } inline bool AppendFromSelf(size_t offset, size_t len) { + char* const op_end = op_ptr_ + len; // See SnappyArrayWriter::AppendFromSelf for an explanation of // the "offset - 1u" trick. - if (offset - 1u < op_ptr_ - op_base_) { - const size_t space_left = op_limit_ - op_ptr_; - if (space_left >= len + kMaxIncrementCopyOverflow) { - // Fast path: src and dst in current block. - IncrementalCopyFastPath(op_ptr_ - offset, op_ptr_, len); - op_ptr_ += len; - return true; - } + if (SNAPPY_PREDICT_TRUE(offset - 1u < op_ptr_ - op_base_ && + op_end <= op_limit_)) { + // Fast path: src and dst in current block. + op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_); + return true; } return SlowAppendFromSelf(offset, len); } @@ -1280,7 +1539,7 @@ bool SnappyScatteredWriter::SlowAppend(const char* ip, size_t len) { } // Make new block - size_t bsize = min(kBlockSize, expected_ - full_size_); + size_t bsize = std::min(kBlockSize, expected_ - full_size_); op_base_ = allocator_.Allocate(bsize); op_ptr_ = op_base_; op_limit_ = op_base_ + bsize; @@ -1337,7 +1596,7 @@ class SnappySinkAllocator { size_t size_written = 0; size_t block_size; for (int i = 0; i < blocks_.size(); ++i) { - block_size = min(blocks_[i].size, size - size_written); + block_size = std::min(blocks_[i].size, size - size_written); dest_->AppendAndTakeOwnership(blocks_[i].data, block_size, &SnappySinkAllocator::Deleter, NULL); size_written += block_size; @@ -1357,7 +1616,7 @@ class SnappySinkAllocator { } Sink* dest_; - vector blocks_; + std::vector blocks_; // Note: copying this object is allowed }; @@ -1382,19 +1641,21 @@ bool Uncompress(Source* compressed, Sink* uncompressed) { char* buf = uncompressed->GetAppendBufferVariable( 1, uncompressed_len, &c, 1, &allocated_size); + const size_t compressed_len = compressed->Available(); // If we can get a flat buffer, then use it, otherwise do block by block // uncompression if (allocated_size >= uncompressed_len) { SnappyArrayWriter writer(buf); - bool result = InternalUncompressAllTags( - &decompressor, &writer, uncompressed_len); + bool result = InternalUncompressAllTags(&decompressor, &writer, + compressed_len, uncompressed_len); uncompressed->Append(buf, writer.Produced()); return result; } else { SnappySinkAllocator allocator(uncompressed); SnappyScatteredWriter writer(allocator); - return InternalUncompressAllTags(&decompressor, &writer, uncompressed_len); + return InternalUncompressAllTags(&decompressor, &writer, compressed_len, + uncompressed_len); } } -} // end namespace snappy +} // namespace snappy diff --git a/other-licenses/snappy/src/snappy.h b/other-licenses/snappy/src/snappy.h index 4568db890d64..e9805bfb7de5 100644 --- a/other-licenses/snappy/src/snappy.h +++ b/other-licenses/snappy/src/snappy.h @@ -39,7 +39,7 @@ #ifndef THIRD_PARTY_SNAPPY_SNAPPY_H__ #define THIRD_PARTY_SNAPPY_SNAPPY_H__ -#include +#include #include #include "snappy-stubs-public.h" @@ -69,11 +69,12 @@ namespace snappy { // Higher-level string based routines (should be sufficient for most users) // ------------------------------------------------------------------------ - // Sets "*output" to the compressed version of "input[0,input_length-1]". - // Original contents of *output are lost. + // Sets "*compressed" to the compressed version of "input[0,input_length-1]". + // Original contents of *compressed are lost. // - // REQUIRES: "input[]" is not an alias of "*output". - size_t Compress(const char* input, size_t input_length, string* output); + // REQUIRES: "input[]" is not an alias of "*compressed". + size_t Compress(const char* input, size_t input_length, + std::string* compressed); // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed". // Original contents of "*uncompressed" are lost. @@ -82,7 +83,7 @@ namespace snappy { // // returns false if the message is corrupted and could not be decompressed bool Uncompress(const char* compressed, size_t compressed_length, - string* uncompressed); + std::string* uncompressed); // Decompresses "compressed" to "*uncompressed". // @@ -193,11 +194,14 @@ namespace snappy { // Note that there might be older data around that is compressed with larger // block sizes, so the decompression code should not rely on the // non-existence of long backreferences. - static const int kBlockLog = 16; - static const size_t kBlockSize = 1 << kBlockLog; + static constexpr int kBlockLog = 16; + static constexpr size_t kBlockSize = 1 << kBlockLog; - static const int kMaxHashTableBits = 14; - static const size_t kMaxHashTableSize = 1 << kMaxHashTableBits; + static constexpr int kMinHashTableBits = 8; + static constexpr size_t kMinHashTableSize = 1 << kMinHashTableBits; + + static constexpr int kMaxHashTableBits = 14; + static constexpr size_t kMaxHashTableSize = 1 << kMaxHashTableBits; } // end namespace snappy #endif // THIRD_PARTY_SNAPPY_SNAPPY_H__ diff --git a/other-licenses/snappy/src/snappy_compress_fuzzer.cc b/other-licenses/snappy/src/snappy_compress_fuzzer.cc new file mode 100644 index 000000000000..1d0119e184d5 --- /dev/null +++ b/other-licenses/snappy/src/snappy_compress_fuzzer.cc @@ -0,0 +1,59 @@ +// Copyright 2019 Google Inc. All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// libFuzzer harness for fuzzing snappy compression code. + +#include +#include +#include +#include + +#include "snappy.h" + +// Entry point for LibFuzzer. +extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { + std::string input(reinterpret_cast(data), size); + + std::string compressed; + size_t compressed_size = + snappy::Compress(input.data(), input.size(), &compressed); + + (void)compressed_size; // Variable only used in debug builds. + assert(compressed_size == compressed.size()); + assert(compressed.size() <= snappy::MaxCompressedLength(input.size())); + assert(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); + + std::string uncompressed_after_compress; + bool uncompress_succeeded = snappy::Uncompress( + compressed.data(), compressed.size(), &uncompressed_after_compress); + + (void)uncompress_succeeded; // Variable only used in debug builds. + assert(uncompress_succeeded); + assert(input == uncompressed_after_compress); + return 0; +} diff --git a/other-licenses/snappy/src/snappy_uncompress_fuzzer.cc b/other-licenses/snappy/src/snappy_uncompress_fuzzer.cc new file mode 100644 index 000000000000..8071c00eea71 --- /dev/null +++ b/other-licenses/snappy/src/snappy_uncompress_fuzzer.cc @@ -0,0 +1,57 @@ +// Copyright 2019 Google Inc. All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// libFuzzer harness for fuzzing snappy's decompression code. + +#include +#include +#include +#include + +#include "snappy.h" + +// Entry point for LibFuzzer. +extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { + std::string input(reinterpret_cast(data), size); + + // Avoid self-crafted decompression bombs. + size_t uncompressed_size; + constexpr size_t kMaxUncompressedSize = 1 << 20; + bool get_uncompressed_length_succeeded = snappy::GetUncompressedLength( + input.data(), input.size(), &uncompressed_size); + if (!get_uncompressed_length_succeeded || + (uncompressed_size > kMaxUncompressedSize)) { + return 0; + } + + std::string uncompressed; + // The return value of snappy::Uncompress() is ignored because decompression + // will fail on invalid inputs. + snappy::Uncompress(input.data(), input.size(), &uncompressed); + return 0; +} diff --git a/other-licenses/snappy/src/snappy_unittest.cc b/other-licenses/snappy/src/snappy_unittest.cc index 65ac16aaa4e2..37159c32d40e 100644 --- a/other-licenses/snappy/src/snappy_unittest.cc +++ b/other-licenses/snappy/src/snappy_unittest.cc @@ -29,9 +29,10 @@ #include #include - #include +#include #include +#include #include #include "snappy.h" @@ -50,13 +51,6 @@ DEFINE_bool(zlib, false, "Run zlib compression (http://www.zlib.net)"); DEFINE_bool(lzo, false, "Run LZO compression (http://www.oberhumer.com/opensource/lzo/)"); -DEFINE_bool(quicklz, false, - "Run quickLZ compression (http://www.quicklz.com/)"); -DEFINE_bool(liblzf, false, - "Run libLZF compression " - "(http://www.goof.com/pcg/marc/liblzf.html)"); -DEFINE_bool(fastlz, false, - "Run FastLZ compression (http://www.fastlz.org/"); DEFINE_bool(snappy, true, "Run snappy compression"); DEFINE_bool(write_compressed, false, @@ -69,8 +63,7 @@ DEFINE_bool(snappy_dump_decompression_table, false, namespace snappy { - -#ifdef HAVE_FUNC_MMAP +#if defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF) // To test against code that reads beyond its input, this class copies a // string to a newly allocated group of pages, the last of which @@ -80,8 +73,8 @@ namespace snappy { // be able to read previously allocated memory while doing heap allocations. class DataEndingAtUnreadablePage { public: - explicit DataEndingAtUnreadablePage(const string& s) { - const size_t page_size = getpagesize(); + explicit DataEndingAtUnreadablePage(const std::string& s) { + const size_t page_size = sysconf(_SC_PAGESIZE); const size_t size = s.size(); // Round up space for string to a multiple of page_size. size_t space_for_string = (size + page_size - 1) & ~(page_size - 1); @@ -99,8 +92,9 @@ class DataEndingAtUnreadablePage { } ~DataEndingAtUnreadablePage() { + const size_t page_size = sysconf(_SC_PAGESIZE); // Undo the mprotect. - CHECK_EQ(0, mprotect(protected_page_, getpagesize(), PROT_READ|PROT_WRITE)); + CHECK_EQ(0, mprotect(protected_page_, page_size, PROT_READ|PROT_WRITE)); CHECK_EQ(0, munmap(mem_, alloc_size_)); } @@ -115,19 +109,19 @@ class DataEndingAtUnreadablePage { size_t size_; }; -#else // HAVE_FUNC_MMAP +#else // defined(HAVE_FUNC_MMAP) && defined(HAVE_FUNC_SYSCONF) // Fallback for systems without mmap. -typedef string DataEndingAtUnreadablePage; +using DataEndingAtUnreadablePage = std::string; #endif enum CompressorType { - ZLIB, LZO, LIBLZF, QUICKLZ, FASTLZ, SNAPPY + ZLIB, LZO, SNAPPY }; const char* names[] = { - "ZLIB", "LZO", "LIBLZF", "QUICKLZ", "FASTLZ", "SNAPPY" + "ZLIB", "LZO", "SNAPPY" }; static size_t MinimumRequiredOutputSpace(size_t input_size, @@ -143,21 +137,6 @@ static size_t MinimumRequiredOutputSpace(size_t input_size, return input_size + input_size/64 + 16 + 3; #endif // LZO_VERSION -#ifdef LZF_VERSION - case LIBLZF: - return input_size; -#endif // LZF_VERSION - -#ifdef QLZ_VERSION_MAJOR - case QUICKLZ: - return input_size + 36000; // 36000 is used for scratch. -#endif // QLZ_VERSION_MAJOR - -#ifdef FASTLZ_VERSION - case FASTLZ: - return max(static_cast(ceil(input_size * 1.05)), 66); -#endif // FASTLZ_VERSION - case SNAPPY: return snappy::MaxCompressedLength(input_size); @@ -175,7 +154,7 @@ static size_t MinimumRequiredOutputSpace(size_t input_size, // "compressed" must be preinitialized to at least MinCompressbufSize(comp) // number of bytes, and may contain junk bytes at the end after return. static bool Compress(const char* input, size_t input_size, CompressorType comp, - string* compressed, bool compressed_is_preallocated) { + std::string* compressed, bool compressed_is_preallocated) { if (!compressed_is_preallocated) { compressed->resize(MinimumRequiredOutputSpace(input_size, comp)); } @@ -217,58 +196,6 @@ static bool Compress(const char* input, size_t input_size, CompressorType comp, } #endif // LZO_VERSION -#ifdef LZF_VERSION - case LIBLZF: { - int destlen = lzf_compress(input, - input_size, - string_as_array(compressed), - input_size); - if (destlen == 0) { - // lzf *can* cause lots of blowup when compressing, so they - // recommend to limit outsize to insize, and just not compress - // if it's bigger. Ideally, we'd just swap input and output. - compressed->assign(input, input_size); - destlen = input_size; - } - if (!compressed_is_preallocated) { - compressed->resize(destlen); - } - break; - } -#endif // LZF_VERSION - -#ifdef QLZ_VERSION_MAJOR - case QUICKLZ: { - qlz_state_compress *state_compress = new qlz_state_compress; - int destlen = qlz_compress(input, - string_as_array(compressed), - input_size, - state_compress); - delete state_compress; - CHECK_NE(0, destlen); - if (!compressed_is_preallocated) { - compressed->resize(destlen); - } - break; - } -#endif // QLZ_VERSION_MAJOR - -#ifdef FASTLZ_VERSION - case FASTLZ: { - // Use level 1 compression since we mostly care about speed. - int destlen = fastlz_compress_level( - 1, - input, - input_size, - string_as_array(compressed)); - if (!compressed_is_preallocated) { - compressed->resize(destlen); - } - CHECK_NE(destlen, 0); - break; - } -#endif // FASTLZ_VERSION - case SNAPPY: { size_t destlen; snappy::RawCompress(input, input_size, @@ -288,8 +215,8 @@ static bool Compress(const char* input, size_t input_size, CompressorType comp, return true; } -static bool Uncompress(const string& compressed, CompressorType comp, - int size, string* output) { +static bool Uncompress(const std::string& compressed, CompressorType comp, + int size, std::string* output) { switch (comp) { #ifdef ZLIB_VERSION case ZLIB: { @@ -323,49 +250,6 @@ static bool Uncompress(const string& compressed, CompressorType comp, } #endif // LZO_VERSION -#ifdef LZF_VERSION - case LIBLZF: { - output->resize(size); - int destlen = lzf_decompress(compressed.data(), - compressed.size(), - string_as_array(output), - output->size()); - if (destlen == 0) { - // This error probably means we had decided not to compress, - // and thus have stored input in output directly. - output->assign(compressed.data(), compressed.size()); - destlen = compressed.size(); - } - CHECK_EQ(destlen, size); - break; - } -#endif // LZF_VERSION - -#ifdef QLZ_VERSION_MAJOR - case QUICKLZ: { - output->resize(size); - qlz_state_decompress *state_decompress = new qlz_state_decompress; - int destlen = qlz_decompress(compressed.data(), - string_as_array(output), - state_decompress); - delete state_decompress; - CHECK_EQ(destlen, size); - break; - } -#endif // QLZ_VERSION_MAJOR - -#ifdef FASTLZ_VERSION - case FASTLZ: { - output->resize(size); - int destlen = fastlz_decompress(compressed.data(), - compressed.length(), - string_as_array(output), - size); - CHECK_EQ(destlen, size); - break; - } -#endif // FASTLZ_VERSION - case SNAPPY: { snappy::RawUncompress(compressed.data(), compressed.size(), string_as_array(output)); @@ -393,13 +277,13 @@ static void Measure(const char* data, { // Chop the input into blocks int num_blocks = (length + block_size - 1) / block_size; - vector input(num_blocks); - vector input_length(num_blocks); - vector compressed(num_blocks); - vector output(num_blocks); + std::vector input(num_blocks); + std::vector input_length(num_blocks); + std::vector compressed(num_blocks); + std::vector output(num_blocks); for (int b = 0; b < num_blocks; b++) { int input_start = b * block_size; - int input_limit = min((b+1)*block_size, length); + int input_limit = std::min((b+1)*block_size, length); input[b] = data+input_start; input_length[b] = input_limit-input_start; @@ -454,29 +338,28 @@ static void Measure(const char* data, } } - sort(ctime, ctime + kRuns); - sort(utime, utime + kRuns); + std::sort(ctime, ctime + kRuns); + std::sort(utime, utime + kRuns); const int med = kRuns/2; float comp_rate = (length / ctime[med]) * repeats / 1048576.0; float uncomp_rate = (length / utime[med]) * repeats / 1048576.0; - string x = names[comp]; + std::string x = names[comp]; x += ":"; - string urate = (uncomp_rate >= 0) - ? StringPrintf("%.1f", uncomp_rate) - : string("?"); + std::string urate = (uncomp_rate >= 0) ? StrFormat("%.1f", uncomp_rate) + : std::string("?"); printf("%-7s [b %dM] bytes %6d -> %6d %4.1f%% " "comp %5.1f MB/s uncomp %5s MB/s\n", x.c_str(), block_size/(1<<20), static_cast(length), static_cast(compressed_size), - (compressed_size * 100.0) / max(1, length), + (compressed_size * 100.0) / std::max(1, length), comp_rate, urate.c_str()); } -static int VerifyString(const string& input) { - string compressed; +static int VerifyString(const std::string& input) { + std::string compressed; DataEndingAtUnreadablePage i(input); const size_t written = snappy::Compress(i.data(), i.size(), &compressed); CHECK_EQ(written, compressed.size()); @@ -484,15 +367,15 @@ static int VerifyString(const string& input) { snappy::MaxCompressedLength(input.size())); CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); - string uncompressed; + std::string uncompressed; DataEndingAtUnreadablePage c(compressed); CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed)); CHECK_EQ(uncompressed, input); return uncompressed.size(); } -static void VerifyStringSink(const string& input) { - string compressed; +static void VerifyStringSink(const std::string& input) { + std::string compressed; DataEndingAtUnreadablePage i(input); const size_t written = snappy::Compress(i.data(), i.size(), &compressed); CHECK_EQ(written, compressed.size()); @@ -500,7 +383,7 @@ static void VerifyStringSink(const string& input) { snappy::MaxCompressedLength(input.size())); CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); - string uncompressed; + std::string uncompressed; uncompressed.resize(input.size()); snappy::UncheckedByteArraySink sink(string_as_array(&uncompressed)); DataEndingAtUnreadablePage c(compressed); @@ -509,8 +392,8 @@ static void VerifyStringSink(const string& input) { CHECK_EQ(uncompressed, input); } -static void VerifyIOVec(const string& input) { - string compressed; +static void VerifyIOVec(const std::string& input) { + std::string compressed; DataEndingAtUnreadablePage i(input); const size_t written = snappy::Compress(i.data(), i.size(), &compressed); CHECK_EQ(written, compressed.size()); @@ -521,23 +404,28 @@ static void VerifyIOVec(const string& input) { // Try uncompressing into an iovec containing a random number of entries // ranging from 1 to 10. char* buf = new char[input.size()]; - ACMRandom rnd(input.size()); - size_t num = rnd.Next() % 10 + 1; + std::minstd_rand0 rng(input.size()); + std::uniform_int_distribution uniform_1_to_10(1, 10); + size_t num = uniform_1_to_10(rng); if (input.size() < num) { num = input.size(); } struct iovec* iov = new iovec[num]; int used_so_far = 0; + std::bernoulli_distribution one_in_five(1.0 / 5); for (size_t i = 0; i < num; ++i) { + assert(used_so_far < input.size()); iov[i].iov_base = buf + used_so_far; if (i == num - 1) { iov[i].iov_len = input.size() - used_so_far; } else { // Randomly choose to insert a 0 byte entry. - if (rnd.OneIn(5)) { + if (one_in_five(rng)) { iov[i].iov_len = 0; } else { - iov[i].iov_len = rnd.Uniform(input.size()); + std::uniform_int_distribution uniform_not_used_so_far( + 0, input.size() - used_so_far - 1); + iov[i].iov_len = uniform_not_used_so_far(rng); } } used_so_far += iov[i].iov_len; @@ -551,22 +439,22 @@ static void VerifyIOVec(const string& input) { // Test that data compressed by a compressor that does not // obey block sizes is uncompressed properly. -static void VerifyNonBlockedCompression(const string& input) { +static void VerifyNonBlockedCompression(const std::string& input) { if (input.length() > snappy::kBlockSize) { // We cannot test larger blocks than the maximum block size, obviously. return; } - string prefix; + std::string prefix; Varint::Append32(&prefix, input.size()); // Setup compression table - snappy::internal::WorkingMemory wmem; + snappy::internal::WorkingMemory wmem(input.size()); int table_size; uint16* table = wmem.GetHashTable(input.size(), &table_size); // Compress entire input in one shot - string compressed; + std::string compressed; compressed += prefix; compressed.resize(prefix.size()+snappy::MaxCompressedLength(input.size())); char* dest = string_as_array(&compressed) + prefix.size(); @@ -574,13 +462,13 @@ static void VerifyNonBlockedCompression(const string& input) { dest, table, table_size); compressed.resize(end - compressed.data()); - // Uncompress into string - string uncomp_str; + // Uncompress into std::string + std::string uncomp_str; CHECK(snappy::Uncompress(compressed.data(), compressed.size(), &uncomp_str)); CHECK_EQ(uncomp_str, input); // Uncompress using source/sink - string uncomp_str2; + std::string uncomp_str2; uncomp_str2.resize(input.size()); snappy::UncheckedByteArraySink sink(string_as_array(&uncomp_str2)); snappy::ByteArraySource source(compressed.data(), compressed.size()); @@ -592,28 +480,28 @@ static void VerifyNonBlockedCompression(const string& input) { static const int kNumBlocks = 10; struct iovec vec[kNumBlocks]; const int block_size = 1 + input.size() / kNumBlocks; - string iovec_data(block_size * kNumBlocks, 'x'); + std::string iovec_data(block_size * kNumBlocks, 'x'); for (int i = 0; i < kNumBlocks; i++) { vec[i].iov_base = string_as_array(&iovec_data) + i * block_size; vec[i].iov_len = block_size; } CHECK(snappy::RawUncompressToIOVec(compressed.data(), compressed.size(), vec, kNumBlocks)); - CHECK_EQ(string(iovec_data.data(), input.size()), input); + CHECK_EQ(std::string(iovec_data.data(), input.size()), input); } } // Expand the input so that it is at least K times as big as block size -static string Expand(const string& input) { +static std::string Expand(const std::string& input) { static const int K = 3; - string data = input; + std::string data = input; while (data.size() < K * snappy::kBlockSize) { data += input; } return data; } -static int Verify(const string& input) { +static int Verify(const std::string& input) { VLOG(1) << "Verifying input of size " << input.size(); // Compress using string based routines @@ -625,7 +513,7 @@ static int Verify(const string& input) { VerifyNonBlockedCompression(input); VerifyIOVec(input); if (!input.empty()) { - const string expanded = Expand(input); + const std::string expanded = Expand(input); VerifyNonBlockedCompression(expanded); VerifyIOVec(input); } @@ -633,21 +521,20 @@ static int Verify(const string& input) { return result; } - -static bool IsValidCompressedBuffer(const string& c) { +static bool IsValidCompressedBuffer(const std::string& c) { return snappy::IsValidCompressedBuffer(c.data(), c.size()); } -static bool Uncompress(const string& c, string* u) { +static bool Uncompress(const std::string& c, std::string* u) { return snappy::Uncompress(c.data(), c.size(), u); } // This test checks to ensure that snappy doesn't coredump if it gets // corrupted data. TEST(CorruptedTest, VerifyCorrupted) { - string source = "making sure we don't crash with corrupted input"; + std::string source = "making sure we don't crash with corrupted input"; VLOG(1) << source; - string dest; - string uncmp; + std::string dest; + std::string uncmp; snappy::Compress(source.data(), source.size(), &dest); // Mess around with the data. It's hard to simulate all possible @@ -694,9 +581,9 @@ TEST(CorruptedTest, VerifyCorrupted) { // try reading stuff in from a bad file. for (int i = 1; i <= 3; ++i) { - string data = ReadTestDataFile(StringPrintf("baddata%d.snappy", i).c_str(), - 0); - string uncmp; + std::string data = + ReadTestDataFile(StrFormat("baddata%d.snappy", i).c_str(), 0); + std::string uncmp; // check that we don't return a crazy length size_t ulen; CHECK(!snappy::GetUncompressedLength(data.data(), data.size(), &ulen) @@ -714,7 +601,7 @@ TEST(CorruptedTest, VerifyCorrupted) { // These mirror the compression code in snappy.cc, but are copied // here so that we can bypass some limitations in the how snappy.cc // invokes these routines. -static void AppendLiteral(string* dst, const string& literal) { +static void AppendLiteral(std::string* dst, const std::string& literal) { if (literal.empty()) return; int n = literal.size() - 1; if (n < 60) { @@ -729,12 +616,12 @@ static void AppendLiteral(string* dst, const string& literal) { n >>= 8; } dst->push_back(0 | ((59+count) << 2)); - *dst += string(number, count); + *dst += std::string(number, count); } *dst += literal; } -static void AppendCopy(string* dst, int offset, int length) { +static void AppendCopy(std::string* dst, int offset, int length) { while (length > 0) { // Figure out how much to copy in one shot int to_copy; @@ -771,51 +658,67 @@ TEST(Snappy, SimpleTests) { Verify("ab"); Verify("abc"); - Verify("aaaaaaa" + string(16, 'b') + string("aaaaa") + "abc"); - Verify("aaaaaaa" + string(256, 'b') + string("aaaaa") + "abc"); - Verify("aaaaaaa" + string(2047, 'b') + string("aaaaa") + "abc"); - Verify("aaaaaaa" + string(65536, 'b') + string("aaaaa") + "abc"); - Verify("abcaaaaaaa" + string(65536, 'b') + string("aaaaa") + "abc"); + Verify("aaaaaaa" + std::string(16, 'b') + std::string("aaaaa") + "abc"); + Verify("aaaaaaa" + std::string(256, 'b') + std::string("aaaaa") + "abc"); + Verify("aaaaaaa" + std::string(2047, 'b') + std::string("aaaaa") + "abc"); + Verify("aaaaaaa" + std::string(65536, 'b') + std::string("aaaaa") + "abc"); + Verify("abcaaaaaaa" + std::string(65536, 'b') + std::string("aaaaa") + "abc"); } // Verify max blowup (lots of four-byte copies) TEST(Snappy, MaxBlowup) { - string input; - for (int i = 0; i < 20000; i++) { - ACMRandom rnd(i); - uint32 bytes = static_cast(rnd.Next()); - input.append(reinterpret_cast(&bytes), sizeof(bytes)); - } - for (int i = 19999; i >= 0; i--) { - ACMRandom rnd(i); - uint32 bytes = static_cast(rnd.Next()); - input.append(reinterpret_cast(&bytes), sizeof(bytes)); + std::mt19937 rng; + std::uniform_int_distribution uniform_byte(0, 255); + std::string input; + for (int i = 0; i < 80000; ++i) + input.push_back(static_cast(uniform_byte(rng))); + + for (int i = 0; i < 80000; i += 4) { + std::string four_bytes(input.end() - i - 4, input.end() - i); + input.append(four_bytes); } Verify(input); } TEST(Snappy, RandomData) { - ACMRandom rnd(FLAGS_test_random_seed); + std::minstd_rand0 rng(FLAGS_test_random_seed); + std::uniform_int_distribution uniform_0_to_3(0, 3); + std::uniform_int_distribution uniform_0_to_8(0, 8); + std::uniform_int_distribution uniform_byte(0, 255); + std::uniform_int_distribution uniform_4k(0, 4095); + std::uniform_int_distribution uniform_64k(0, 65535); + std::bernoulli_distribution one_in_ten(1.0 / 10); - const int num_ops = 20000; + constexpr int num_ops = 20000; for (int i = 0; i < num_ops; i++) { if ((i % 1000) == 0) { VLOG(0) << "Random op " << i << " of " << num_ops; } - string x; - size_t len = rnd.Uniform(4096); + std::string x; + size_t len = uniform_4k(rng); if (i < 100) { - len = 65536 + rnd.Uniform(65536); + len = 65536 + uniform_64k(rng); } while (x.size() < len) { int run_len = 1; - if (rnd.OneIn(10)) { - run_len = rnd.Skewed(8); + if (one_in_ten(rng)) { + int skewed_bits = uniform_0_to_8(rng); + // int is guaranteed to hold at least 16 bits, this uses at most 8 bits. + std::uniform_int_distribution skewed_low(0, + (1 << skewed_bits) - 1); + run_len = skewed_low(rng); + } + char c = static_cast(uniform_byte(rng)); + if (i >= 100) { + int skewed_bits = uniform_0_to_3(rng); + // int is guaranteed to hold at least 16 bits, this uses at most 3 bits. + std::uniform_int_distribution skewed_low(0, + (1 << skewed_bits) - 1); + c = static_cast(skewed_low(rng)); } - char c = (i < 100) ? rnd.Uniform(256) : rnd.Skewed(3); while (run_len-- > 0 && x.size() < len) { - x += c; + x.push_back(c); } } @@ -829,19 +732,19 @@ TEST(Snappy, FourByteOffset) { // copy manually. // The two fragments that make up the input string. - string fragment1 = "012345689abcdefghijklmnopqrstuvwxyz"; - string fragment2 = "some other string"; + std::string fragment1 = "012345689abcdefghijklmnopqrstuvwxyz"; + std::string fragment2 = "some other string"; // How many times each fragment is emitted. const int n1 = 2; const int n2 = 100000 / fragment2.size(); const int length = n1 * fragment1.size() + n2 * fragment2.size(); - string compressed; + std::string compressed; Varint::Append32(&compressed, length); AppendLiteral(&compressed, fragment1); - string src = fragment1; + std::string src = fragment1; for (int i = 0; i < n2; i++) { AppendLiteral(&compressed, fragment2); src += fragment2; @@ -850,7 +753,7 @@ TEST(Snappy, FourByteOffset) { src += fragment1; CHECK_EQ(length, src.size()); - string uncompressed; + std::string uncompressed; CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); CHECK(snappy::Uncompress(compressed.data(), compressed.size(), &uncompressed)); @@ -872,7 +775,7 @@ TEST(Snappy, IOVecEdgeCases) { iov[i].iov_len = kLengths[i]; } - string compressed; + std::string compressed; Varint::Append32(&compressed, 22); // A literal whose output crosses three blocks. @@ -933,7 +836,7 @@ TEST(Snappy, IOVecLiteralOverflow) { iov[i].iov_len = kLengths[i]; } - string compressed; + std::string compressed; Varint::Append32(&compressed, 8); AppendLiteral(&compressed, "12345678"); @@ -955,7 +858,7 @@ TEST(Snappy, IOVecCopyOverflow) { iov[i].iov_len = kLengths[i]; } - string compressed; + std::string compressed; Varint::Append32(&compressed, 8); AppendLiteral(&compressed, "123"); @@ -969,7 +872,7 @@ TEST(Snappy, IOVecCopyOverflow) { } } -static bool CheckUncompressedLength(const string& compressed, +static bool CheckUncompressedLength(const std::string& compressed, size_t* ulength) { const bool result1 = snappy::GetUncompressedLength(compressed.data(), compressed.size(), @@ -983,7 +886,7 @@ static bool CheckUncompressedLength(const string& compressed, } TEST(SnappyCorruption, TruncatedVarint) { - string compressed, uncompressed; + std::string compressed, uncompressed; size_t ulength; compressed.push_back('\xf0'); CHECK(!CheckUncompressedLength(compressed, &ulength)); @@ -993,7 +896,7 @@ TEST(SnappyCorruption, TruncatedVarint) { } TEST(SnappyCorruption, UnterminatedVarint) { - string compressed, uncompressed; + std::string compressed, uncompressed; size_t ulength; compressed.push_back('\x80'); compressed.push_back('\x80'); @@ -1008,7 +911,7 @@ TEST(SnappyCorruption, UnterminatedVarint) { } TEST(SnappyCorruption, OverflowingVarint) { - string compressed, uncompressed; + std::string compressed, uncompressed; size_t ulength; compressed.push_back('\xfb'); compressed.push_back('\xff'); @@ -1025,14 +928,14 @@ TEST(Snappy, ReadPastEndOfBuffer) { // Check that we do not read past end of input // Make a compressed string that ends with a single-byte literal - string compressed; + std::string compressed; Varint::Append32(&compressed, 1); AppendLiteral(&compressed, "x"); - string uncompressed; + std::string uncompressed; DataEndingAtUnreadablePage c(compressed); CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed)); - CHECK_EQ(uncompressed, string("x")); + CHECK_EQ(uncompressed, std::string("x")); } // Check for an infinite loop caused by a copy with offset==0 @@ -1054,7 +957,10 @@ TEST(Snappy, ZeroOffsetCopyValidation) { namespace { int TestFindMatchLength(const char* s1, const char *s2, unsigned length) { - return snappy::internal::FindMatchLength(s1, s2, s2 + length); + std::pair p = + snappy::internal::FindMatchLength(s1, s2, s2 + length); + CHECK_EQ(p.first < 8, p.second); + return p.first; } } // namespace @@ -1150,22 +1056,24 @@ TEST(Snappy, FindMatchLength) { } TEST(Snappy, FindMatchLengthRandom) { - const int kNumTrials = 10000; - const int kTypicalLength = 10; - ACMRandom rnd(FLAGS_test_random_seed); + constexpr int kNumTrials = 10000; + constexpr int kTypicalLength = 10; + std::minstd_rand0 rng(FLAGS_test_random_seed); + std::uniform_int_distribution uniform_byte(0, 255); + std::bernoulli_distribution one_in_two(1.0 / 2); + std::bernoulli_distribution one_in_typical_length(1.0 / kTypicalLength); for (int i = 0; i < kNumTrials; i++) { - string s, t; - char a = rnd.Rand8(); - char b = rnd.Rand8(); - while (!rnd.OneIn(kTypicalLength)) { - s.push_back(rnd.OneIn(2) ? a : b); - t.push_back(rnd.OneIn(2) ? a : b); + std::string s, t; + char a = static_cast(uniform_byte(rng)); + char b = static_cast(uniform_byte(rng)); + while (!one_in_typical_length(rng)) { + s.push_back(one_in_two(rng) ? a : b); + t.push_back(one_in_two(rng) ? a : b); } DataEndingAtUnreadablePage u(s); DataEndingAtUnreadablePage v(t); - int matched = snappy::internal::FindMatchLength( - u.data(), v.data(), v.data() + t.size()); + int matched = TestFindMatchLength(u.data(), v.data(), t.size()); if (matched == t.size()) { EXPECT_EQ(s, t); } else { @@ -1195,7 +1103,6 @@ TEST(Snappy, VerifyCharTable) { using snappy::internal::COPY_2_BYTE_OFFSET; using snappy::internal::COPY_4_BYTE_OFFSET; using snappy::internal::char_table; - using snappy::internal::wordmask; uint16 dst[256]; @@ -1272,49 +1179,46 @@ TEST(Snappy, VerifyCharTable) { } static void CompressFile(const char* fname) { - string fullinput; + std::string fullinput; CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults())); - string compressed; + std::string compressed; Compress(fullinput.data(), fullinput.size(), SNAPPY, &compressed, false); - CHECK_OK(file::SetContents(string(fname).append(".comp"), compressed, + CHECK_OK(file::SetContents(std::string(fname).append(".comp"), compressed, file::Defaults())); } static void UncompressFile(const char* fname) { - string fullinput; + std::string fullinput; CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults())); size_t uncompLength; CHECK(CheckUncompressedLength(fullinput, &uncompLength)); - string uncompressed; + std::string uncompressed; uncompressed.resize(uncompLength); CHECK(snappy::Uncompress(fullinput.data(), fullinput.size(), &uncompressed)); - CHECK_OK(file::SetContents(string(fname).append(".uncomp"), uncompressed, + CHECK_OK(file::SetContents(std::string(fname).append(".uncomp"), uncompressed, file::Defaults())); } static void MeasureFile(const char* fname) { - string fullinput; + std::string fullinput; CHECK_OK(file::GetContents(fname, &fullinput, file::Defaults())); printf("%-40s :\n", fname); int start_len = (FLAGS_start_len < 0) ? fullinput.size() : FLAGS_start_len; int end_len = fullinput.size(); if (FLAGS_end_len >= 0) { - end_len = min(fullinput.size(), FLAGS_end_len); + end_len = std::min(fullinput.size(), FLAGS_end_len); } for (int len = start_len; len <= end_len; len++) { const char* const input = fullinput.data(); int repeats = (FLAGS_bytes + len) / (len + 1); if (FLAGS_zlib) Measure(input, len, ZLIB, repeats, 1024<<10); if (FLAGS_lzo) Measure(input, len, LZO, repeats, 1024<<10); - if (FLAGS_liblzf) Measure(input, len, LIBLZF, repeats, 1024<<10); - if (FLAGS_quicklz) Measure(input, len, QUICKLZ, repeats, 1024<<10); - if (FLAGS_fastlz) Measure(input, len, FASTLZ, repeats, 1024<<10); if (FLAGS_snappy) Measure(input, len, SNAPPY, repeats, 4096<<10); // For block-size based measurements @@ -1354,10 +1258,10 @@ static void BM_UFlat(int iters, int arg) { // Pick file to process based on "arg" CHECK_GE(arg, 0); CHECK_LT(arg, ARRAYSIZE(files)); - string contents = ReadTestDataFile(files[arg].filename, - files[arg].size_limit); + std::string contents = + ReadTestDataFile(files[arg].filename, files[arg].size_limit); - string zcontents; + std::string zcontents; snappy::Compress(contents.data(), contents.size(), &zcontents); char* dst = new char[contents.size()]; @@ -1380,10 +1284,10 @@ static void BM_UValidate(int iters, int arg) { // Pick file to process based on "arg" CHECK_GE(arg, 0); CHECK_LT(arg, ARRAYSIZE(files)); - string contents = ReadTestDataFile(files[arg].filename, - files[arg].size_limit); + std::string contents = + ReadTestDataFile(files[arg].filename, files[arg].size_limit); - string zcontents; + std::string zcontents; snappy::Compress(contents.data(), contents.size(), &zcontents); SetBenchmarkBytesProcessed(static_cast(iters) * @@ -1403,10 +1307,10 @@ static void BM_UIOVec(int iters, int arg) { // Pick file to process based on "arg" CHECK_GE(arg, 0); CHECK_LT(arg, ARRAYSIZE(files)); - string contents = ReadTestDataFile(files[arg].filename, - files[arg].size_limit); + std::string contents = + ReadTestDataFile(files[arg].filename, files[arg].size_limit); - string zcontents; + std::string zcontents; snappy::Compress(contents.data(), contents.size(), &zcontents); // Uncompress into an iovec containing ten entries. @@ -1449,10 +1353,10 @@ static void BM_UFlatSink(int iters, int arg) { // Pick file to process based on "arg" CHECK_GE(arg, 0); CHECK_LT(arg, ARRAYSIZE(files)); - string contents = ReadTestDataFile(files[arg].filename, - files[arg].size_limit); + std::string contents = + ReadTestDataFile(files[arg].filename, files[arg].size_limit); - string zcontents; + std::string zcontents; snappy::Compress(contents.data(), contents.size(), &zcontents); char* dst = new char[contents.size()]; @@ -1467,7 +1371,7 @@ static void BM_UFlatSink(int iters, int arg) { } StopBenchmarkTiming(); - string s(dst, contents.size()); + std::string s(dst, contents.size()); CHECK_EQ(contents, s); delete[] dst; @@ -1481,8 +1385,8 @@ static void BM_ZFlat(int iters, int arg) { // Pick file to process based on "arg" CHECK_GE(arg, 0); CHECK_LT(arg, ARRAYSIZE(files)); - string contents = ReadTestDataFile(files[arg].filename, - files[arg].size_limit); + std::string contents = + ReadTestDataFile(files[arg].filename, files[arg].size_limit); char* dst = new char[snappy::MaxCompressedLength(contents.size())]; @@ -1497,16 +1401,89 @@ static void BM_ZFlat(int iters, int arg) { StopBenchmarkTiming(); const double compression_ratio = static_cast(zsize) / std::max(1, contents.size()); - SetBenchmarkLabel(StringPrintf("%s (%.2f %%)", - files[arg].label, 100.0 * compression_ratio)); - VLOG(0) << StringPrintf("compression for %s: %zd -> %zd bytes", - files[arg].label, contents.size(), zsize); + SetBenchmarkLabel(StrFormat("%s (%.2f %%)", files[arg].label, + 100.0 * compression_ratio)); + VLOG(0) << StrFormat("compression for %s: %zd -> %zd bytes", + files[arg].label, static_cast(contents.size()), + static_cast(zsize)); delete[] dst; } BENCHMARK(BM_ZFlat)->DenseRange(0, ARRAYSIZE(files) - 1); -} // namespace snappy +static void BM_ZFlatAll(int iters, int arg) { + StopBenchmarkTiming(); + CHECK_EQ(arg, 0); + const int num_files = ARRAYSIZE(files); + + std::vector contents(num_files); + std::vector dst(num_files); + + int64 total_contents_size = 0; + for (int i = 0; i < num_files; ++i) { + contents[i] = ReadTestDataFile(files[i].filename, files[i].size_limit); + dst[i] = new char[snappy::MaxCompressedLength(contents[i].size())]; + total_contents_size += contents[i].size(); + } + + SetBenchmarkBytesProcessed(static_cast(iters) * total_contents_size); + StartBenchmarkTiming(); + + size_t zsize = 0; + while (iters-- > 0) { + for (int i = 0; i < num_files; ++i) { + snappy::RawCompress(contents[i].data(), contents[i].size(), dst[i], + &zsize); + } + } + StopBenchmarkTiming(); + + for (int i = 0; i < num_files; ++i) { + delete[] dst[i]; + } + SetBenchmarkLabel(StrFormat("%d files", num_files)); +} +BENCHMARK(BM_ZFlatAll)->DenseRange(0, 0); + +static void BM_ZFlatIncreasingTableSize(int iters, int arg) { + StopBenchmarkTiming(); + + CHECK_EQ(arg, 0); + CHECK_GT(ARRAYSIZE(files), 0); + const std::string base_content = + ReadTestDataFile(files[0].filename, files[0].size_limit); + + std::vector contents; + std::vector dst; + int64 total_contents_size = 0; + for (int table_bits = kMinHashTableBits; table_bits <= kMaxHashTableBits; + ++table_bits) { + std::string content = base_content; + content.resize(1 << table_bits); + dst.push_back(new char[snappy::MaxCompressedLength(content.size())]); + total_contents_size += content.size(); + contents.push_back(std::move(content)); + } + + size_t zsize = 0; + SetBenchmarkBytesProcessed(static_cast(iters) * total_contents_size); + StartBenchmarkTiming(); + while (iters-- > 0) { + for (int i = 0; i < contents.size(); ++i) { + snappy::RawCompress(contents[i].data(), contents[i].size(), dst[i], + &zsize); + } + } + StopBenchmarkTiming(); + + for (int i = 0; i < dst.size(); ++i) { + delete[] dst[i]; + } + SetBenchmarkLabel(StrFormat("%zd tables", contents.size())); +} +BENCHMARK(BM_ZFlatIncreasingTableSize)->DenseRange(0, 0); + +} // namespace snappy int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); @@ -1515,11 +1492,11 @@ int main(int argc, char** argv) { if (argc >= 2) { for (int arg = 1; arg < argc; arg++) { if (FLAGS_write_compressed) { - CompressFile(argv[arg]); + snappy::CompressFile(argv[arg]); } else if (FLAGS_write_uncompressed) { - UncompressFile(argv[arg]); + snappy::UncompressFile(argv[arg]); } else { - MeasureFile(argv[arg]); + snappy::MeasureFile(argv[arg]); } } return 0;