зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1680230 - Part 2: Vendor, r=janerik
Differential Revision: https://phabricator.services.mozilla.com/D98595
This commit is contained in:
Родитель
d93f852725
Коммит
173311ea52
|
@ -525,7 +525,7 @@ dependencies = [
|
|||
"nserror",
|
||||
"nsstring",
|
||||
"rental",
|
||||
"rkv 0.15.0",
|
||||
"rkv 0.16.0",
|
||||
"rust_cascade",
|
||||
"sha2",
|
||||
"storage_variant",
|
||||
|
@ -2654,7 +2654,7 @@ dependencies = [
|
|||
"moz_task",
|
||||
"nserror",
|
||||
"nsstring",
|
||||
"rkv 0.15.0",
|
||||
"rkv 0.16.0",
|
||||
"storage_variant",
|
||||
"tempfile",
|
||||
"thin-vec",
|
||||
|
@ -4287,6 +4287,29 @@ dependencies = [
|
|||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rkv"
|
||||
version = "0.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e7a845aee216ee2be8a02ea265d70563d1ed7bd852c04f709a8f11979079b2ac"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"bincode",
|
||||
"bitflags",
|
||||
"byteorder",
|
||||
"failure",
|
||||
"id-arena",
|
||||
"lazy_static",
|
||||
"lmdb-rkv",
|
||||
"log",
|
||||
"ordered-float",
|
||||
"paste",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rlbox_lucet_sandbox"
|
||||
version = "0.1.0"
|
||||
|
@ -6063,7 +6086,7 @@ dependencies = [
|
|||
"nserror",
|
||||
"nsstring",
|
||||
"once_cell",
|
||||
"rkv 0.15.0",
|
||||
"rkv 0.16.0",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"xpcom",
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -0,0 +1,15 @@
|
|||
# Community Participation Guidelines
|
||||
|
||||
This repository is governed by Mozilla's code of conduct and etiquette guidelines.
|
||||
For more details, please read the
|
||||
[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/).
|
||||
|
||||
## How to Report
|
||||
For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page.
|
||||
|
||||
<!--
|
||||
## Project Specific Etiquette
|
||||
|
||||
In some cases, there will be additional project etiquette i.e.: (https://bugzilla.mozilla.org/page.cgi?id=etiquette.html).
|
||||
Please update for your project.
|
||||
-->
|
|
@ -0,0 +1,492 @@
|
|||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "adler"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
|
||||
|
||||
[[package]]
|
||||
name = "arrayref"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.50"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bincode"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
|
||||
|
||||
[[package]]
|
||||
name = "failure"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"failure_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "failure_derive"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"synstructure",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.22.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724"
|
||||
|
||||
[[package]]
|
||||
name = "id-arena"
|
||||
version = "2.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005"
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9"
|
||||
dependencies = [
|
||||
"matches",
|
||||
"unicode-bidi",
|
||||
"unicode-normalization",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.72"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701"
|
||||
|
||||
[[package]]
|
||||
name = "lmdb-rkv"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "447a296f7aca299cfbb50f4e4f3d49451549af655fb7215d7f8c0c3d64bad42b"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"byteorder",
|
||||
"libc",
|
||||
"lmdb-rkv-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lmdb-rkv-sys"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matches"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f"
|
||||
dependencies = [
|
||||
"adler",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5"
|
||||
|
||||
[[package]]
|
||||
name = "ordered-float"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3741934be594d77de1c8461ebcbbe866f585ea616a9753aa78f2bdc69f0e4579"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
||||
dependencies = [
|
||||
"paste-impl",
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste-impl"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
||||
dependencies = [
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "percent-encoding"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa"
|
||||
dependencies = [
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"rand_hc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_hc"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
|
||||
dependencies = [
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.1.57"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
|
||||
|
||||
[[package]]
|
||||
name = "remove_dir_all"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rkv"
|
||||
version = "0.15.0"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"bincode",
|
||||
"bitflags",
|
||||
"byteorder",
|
||||
"failure",
|
||||
"id-arena",
|
||||
"lazy_static",
|
||||
"lmdb-rkv",
|
||||
"log",
|
||||
"ordered-float",
|
||||
"paste",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"tempfile",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.114"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.114"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.12.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"rand",
|
||||
"redox_syscall",
|
||||
"remove_dir_all",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinyvec"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-bidi"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5"
|
||||
dependencies = [
|
||||
"matches",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-normalization"
|
||||
version = "0.1.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977"
|
||||
dependencies = [
|
||||
"tinyvec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
|
||||
|
||||
[[package]]
|
||||
name = "url"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb"
|
||||
dependencies = [
|
||||
"idna",
|
||||
"matches",
|
||||
"percent-encoding",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.9.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
|
@ -0,0 +1,88 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "rkv"
|
||||
version = "0.15.0"
|
||||
authors = ["Richard Newman <rnewman@twinql.com>", "Nan Jiang <najiang@mozilla.com>", "Myk Melez <myk@mykzilla.org>", "Victor Porof <vporof@mozilla.com>"]
|
||||
exclude = ["/tests/envs/*"]
|
||||
description = "A simple, humane, typed key-value storage solution"
|
||||
homepage = "https://github.com/mozilla/rkv"
|
||||
documentation = "https://docs.rs/rkv"
|
||||
readme = "README.md"
|
||||
keywords = ["lmdb", "database", "storage"]
|
||||
categories = ["database"]
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/mozilla/rkv"
|
||||
[dependencies.arrayref]
|
||||
version = "0.3"
|
||||
|
||||
[dependencies.bincode]
|
||||
version = "1.0"
|
||||
|
||||
[dependencies.bitflags]
|
||||
version = "1"
|
||||
|
||||
[dependencies.byteorder]
|
||||
version = "1"
|
||||
|
||||
[dependencies.failure]
|
||||
version = "0.1"
|
||||
features = ["derive"]
|
||||
default_features = false
|
||||
|
||||
[dependencies.id-arena]
|
||||
version = "2.2"
|
||||
|
||||
[dependencies.lazy_static]
|
||||
version = "1.0"
|
||||
|
||||
[dependencies.lmdb-rkv]
|
||||
version = "0.14"
|
||||
|
||||
[dependencies.log]
|
||||
version = "0.4"
|
||||
|
||||
[dependencies.ordered-float]
|
||||
version = "1.0"
|
||||
|
||||
[dependencies.paste]
|
||||
version = "0.1"
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1.0"
|
||||
features = ["derive", "rc"]
|
||||
|
||||
[dependencies.serde_derive]
|
||||
version = "1.0"
|
||||
|
||||
[dependencies.url]
|
||||
version = "2.0"
|
||||
|
||||
[dependencies.uuid]
|
||||
version = "0.8"
|
||||
[dev-dependencies.byteorder]
|
||||
version = "1"
|
||||
|
||||
[dev-dependencies.tempfile]
|
||||
version = "3"
|
||||
|
||||
[features]
|
||||
backtrace = ["failure/backtrace", "failure/std"]
|
||||
db-dup-sort = []
|
||||
db-int-key = []
|
||||
default = ["db-dup-sort", "db-int-key"]
|
||||
no-canonicalize-path = []
|
||||
with-asan = ["lmdb-rkv/with-asan"]
|
||||
with-fuzzer = ["lmdb-rkv/with-fuzzer"]
|
||||
with-fuzzer-no-link = ["lmdb-rkv/with-fuzzer-no-link"]
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,80 @@
|
|||
# rkv
|
||||
|
||||
[![Travis CI Build Status](https://travis-ci.org/mozilla/rkv.svg?branch=master)](https://travis-ci.org/mozilla/rkv)
|
||||
[![Appveyor Build Status](https://ci.appveyor.com/api/projects/status/lk936u5y5bi6qafb/branch/master?svg=true)](https://ci.appveyor.com/project/mykmelez/rkv/branch/master)
|
||||
[![Documentation](https://docs.rs/rkv/badge.svg)](https://docs.rs/rkv/)
|
||||
[![Crate](https://img.shields.io/crates/v/rkv.svg)](https://crates.io/crates/rkv)
|
||||
|
||||
The [rkv Rust crate](https://crates.io/crates/rkv) is a simple, humane, typed key-value storage solution. It supports multiple backend engines with varying guarantees, such as [LMDB](http://www.lmdb.tech/doc/) for performance, or "SafeMode" for reliability.
|
||||
|
||||
## ⚠️ Warning ⚠️
|
||||
|
||||
To use rkv in production/release environments at Mozilla, you may do so with the "SafeMode" backend, for example:
|
||||
|
||||
```rust
|
||||
use rkv::{Manager, Rkv};
|
||||
use rkv::backend::{SafeMode, SafeModeEnvironment};
|
||||
|
||||
let mut manager = Manager::<SafeModeEnvironment>::singleton().write().unwrap();
|
||||
let shared_rkv = manager.get_or_create(path, Rkv::new::<SafeMode>).unwrap();
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
The "SafeMode" backend performs well, with two caveats: the entire database is stored in memory, and write transactions are synchronously written to disk (only on commit).
|
||||
|
||||
In the future, it will be advisable to switch to a different backend with better performance guarantees. We're working on either fixing some LMDB crashes, or offering more choices of backend engines (e.g. SQLite).
|
||||
|
||||
## Use
|
||||
|
||||
Comprehensive information about using rkv is available in its [online documentation](https://docs.rs/rkv/), which can also be generated for local consumption:
|
||||
|
||||
```sh
|
||||
cargo doc --open
|
||||
```
|
||||
|
||||
## Build
|
||||
|
||||
Build this project as you would build other Rust crates:
|
||||
|
||||
```sh
|
||||
cargo build
|
||||
```
|
||||
|
||||
### Features
|
||||
|
||||
There are several features that you can opt-in and out of when using rkv:
|
||||
|
||||
By default, `db-dup-sort` and `db-int-key` features offer high level database APIs which allow multiple values per key, and optimizations around integer-based keys respectively. Opt out of these default features when specifying the rkv dependency in your Cargo.toml file to disable them; doing so avoids a certain amount of overhead required to support them.
|
||||
|
||||
If you specify the `backtrace` feature, backtraces will be enabled in "failure" errors. This feature is disabled by default.
|
||||
|
||||
To aid fuzzing efforts, `with-asan`, `with-fuzzer`, and `with-fuzzer-no-link` configure the build scripts responsible with compiling the underlying backing engines (e.g. LMDB) to build with these LLMV features enabled. Please refer to the official LLVM/Clang documentation on them for more informatiuon. These features are also disabled by default.
|
||||
|
||||
## Test
|
||||
|
||||
Test this project as you would test other Rust crates:
|
||||
|
||||
```sh
|
||||
cargo test
|
||||
```
|
||||
|
||||
The project includes unit and doc tests embedded in the `src/` files, integration tests in the `tests/` subdirectory, and usage examples in the `examples/` subdirectory. To ensure your changes don't break examples, also run them via the run-all-examples.sh shell script:
|
||||
|
||||
```sh
|
||||
./run-all-examples.sh
|
||||
```
|
||||
|
||||
Note: the test fixtures in the `tests/envs/` subdirectory aren't included in the package published to crates.io, so you must clone this repository in order to run the tests that depend on those fixtures or use the `rand` and `dump` executables to recreate them.
|
||||
|
||||
## Contribute
|
||||
|
||||
Of the various open source archetypes described in [A Framework for Purposeful Open Source](https://medium.com/mozilla-open-innovation/whats-your-open-source-strategy-here-are-10-answers-383221b3f9d3), the rkv project most closely resembles the Specialty Library, and we welcome contributions. Please report problems or ask questions using this repo's GitHub [issue tracker](https://github.com/mozilla/rkv/issues) and submit [pull requests](https://github.com/mozilla/rkv/pulls) for code and documentation changes.
|
||||
|
||||
rkv relies on the latest [rustfmt](https://github.com/rust-lang-nursery/rustfmt) for code formatting, so please make sure your pull request passes the rustfmt before submitting it for review. See rustfmt's [quick start](https://github.com/rust-lang-nursery/rustfmt#quick-start) for installation details.
|
||||
|
||||
We follow Mozilla's [Community Participation Guidelines](https://www.mozilla.org/en-US/about/governance/policies/participation/) while contributing to this project.
|
||||
|
||||
## License
|
||||
|
||||
The rkv source code is licensed under the Apache License, Version 2.0, as described in the [LICENSE](https://github.com/mozilla/rkv/blob/master/LICENSE) file.
|
|
@ -0,0 +1,11 @@
|
|||
## Examples of how to use rkv
|
||||
|
||||
All examples can be executed with:
|
||||
|
||||
```
|
||||
cargo run --example $name
|
||||
```
|
||||
|
||||
* [`simple-store`](simple-store.rs) - a simple key/value store that showcases the basic usage of rkv.
|
||||
|
||||
* [`iterator`](iterator.rs) - a demo that showcases the basic usage of iterators in rkv.
|
|
@ -0,0 +1,84 @@
|
|||
// Any copyright is dedicated to the Public Domain.
|
||||
// http://creativecommons.org/publicdomain/zero/1.0/
|
||||
|
||||
//! A demo that showcases the basic usage of iterators in rkv.
|
||||
//!
|
||||
//! You can test this out by running:
|
||||
//!
|
||||
//! cargo run --example iterator
|
||||
|
||||
use std::{
|
||||
fs,
|
||||
str,
|
||||
};
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
use rkv::{
|
||||
backend::{
|
||||
Lmdb,
|
||||
LmdbDatabase,
|
||||
LmdbEnvironment,
|
||||
},
|
||||
Manager,
|
||||
Rkv,
|
||||
SingleStore,
|
||||
StoreError,
|
||||
StoreOptions,
|
||||
Value,
|
||||
};
|
||||
|
||||
fn main() {
|
||||
let root = Builder::new().prefix("iterator").tempdir().unwrap();
|
||||
fs::create_dir_all(root.path()).unwrap();
|
||||
let p = root.path();
|
||||
|
||||
let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
|
||||
let created_arc = manager.get_or_create(p, Rkv::new::<Lmdb>).unwrap();
|
||||
let k = created_arc.read().unwrap();
|
||||
let store = k.open_single("store", StoreOptions::create()).unwrap();
|
||||
|
||||
populate_store(&k, store).unwrap();
|
||||
|
||||
let reader = k.read().unwrap();
|
||||
|
||||
println!("Iterating from the beginning...");
|
||||
// Reader::iter_start() iterates from the first item in the store, and
|
||||
// returns the (key, value) tuples in order.
|
||||
let mut iter = store.iter_start(&reader).unwrap();
|
||||
while let Some(Ok((country, city))) = iter.next() {
|
||||
println!("{}, {:?}", str::from_utf8(country).unwrap(), city);
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("Iterating from the given key...");
|
||||
// Reader::iter_from() iterates from the first key equal to or greater
|
||||
// than the given key.
|
||||
let mut iter = store.iter_from(&reader, "Japan").unwrap();
|
||||
while let Some(Ok((country, city))) = iter.next() {
|
||||
println!("{}, {:?}", str::from_utf8(country).unwrap(), city);
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("Iterating from the given prefix...");
|
||||
let mut iter = store.iter_from(&reader, "Un").unwrap();
|
||||
while let Some(Ok((country, city))) = iter.next() {
|
||||
println!("{}, {:?}", str::from_utf8(country).unwrap(), city);
|
||||
}
|
||||
}
|
||||
|
||||
fn populate_store(k: &Rkv<LmdbEnvironment>, store: SingleStore<LmdbDatabase>) -> Result<(), StoreError> {
|
||||
let mut writer = k.write()?;
|
||||
for (country, city) in vec![
|
||||
("Canada", Value::Str("Ottawa")),
|
||||
("United States of America", Value::Str("Washington")),
|
||||
("Germany", Value::Str("Berlin")),
|
||||
("France", Value::Str("Paris")),
|
||||
("Italy", Value::Str("Rome")),
|
||||
("United Kingdom", Value::Str("London")),
|
||||
("Japan", Value::Str("Tokyo")),
|
||||
] {
|
||||
store.put(&mut writer, country, &city)?;
|
||||
}
|
||||
writer.commit()
|
||||
}
|
|
@ -0,0 +1,194 @@
|
|||
// Any copyright is dedicated to the Public Domain.
|
||||
// http://creativecommons.org/publicdomain/zero/1.0/
|
||||
|
||||
//! A simple rkv demo that showcases the basic usage (put/get/delete) of rkv.
|
||||
//!
|
||||
//! You can test this out by running:
|
||||
//!
|
||||
//! cargo run --example simple-store
|
||||
|
||||
use std::fs;
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
use rkv::{
|
||||
backend::{
|
||||
BackendStat,
|
||||
Lmdb,
|
||||
LmdbDatabase,
|
||||
LmdbEnvironment,
|
||||
LmdbRwTransaction,
|
||||
},
|
||||
Manager,
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
Value,
|
||||
};
|
||||
|
||||
type MultiStore = rkv::MultiStore<LmdbDatabase>;
|
||||
type Writer<'w> = rkv::Writer<LmdbRwTransaction<'w>>;
|
||||
|
||||
fn getput<'w, 's>(store: MultiStore, writer: &'w mut Writer, ids: &'s mut Vec<String>) {
|
||||
let keys = vec!["str1", "str2", "str3"];
|
||||
// we convert the writer into a cursor so that we can safely read
|
||||
for k in keys.iter() {
|
||||
// this is a multi-valued database, so get returns an iterator
|
||||
let mut iter = store.get(writer, k).unwrap();
|
||||
while let Some(Ok((_key, val))) = iter.next() {
|
||||
if let Value::Str(s) = val {
|
||||
ids.push(s.to_owned());
|
||||
} else {
|
||||
panic!("didn't get a string back!");
|
||||
}
|
||||
}
|
||||
}
|
||||
for id in ids {
|
||||
store.put(writer, &id, &Value::Blob(b"weeeeeee")).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn delete(store: MultiStore, writer: &mut Writer) {
|
||||
let keys = vec!["str1", "str2", "str3"];
|
||||
let vals = vec!["string uno", "string quatro", "string siete"];
|
||||
// we convert the writer into a cursor so that we can safely read
|
||||
for i in 0..keys.len() {
|
||||
store.delete(writer, &keys[i], &Value::Str(vals[i])).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let root = Builder::new().prefix("simple-db").tempdir().unwrap();
|
||||
fs::create_dir_all(root.path()).unwrap();
|
||||
let p = root.path();
|
||||
|
||||
// The manager enforces that each process opens the same lmdb environment at most once
|
||||
let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
|
||||
let created_arc = manager.get_or_create(p, Rkv::new::<Lmdb>).unwrap();
|
||||
let k = created_arc.read().unwrap();
|
||||
|
||||
// Creates a store called "store"
|
||||
let store = k.open_single("store", StoreOptions::create()).unwrap();
|
||||
let multistore = k.open_multi("multistore", StoreOptions::create()).unwrap();
|
||||
|
||||
println!("Inserting data...");
|
||||
{
|
||||
// Use a writer to mutate the store
|
||||
let mut writer = k.write().unwrap();
|
||||
store.put(&mut writer, "int", &Value::I64(1234)).unwrap();
|
||||
store.put(&mut writer, "uint", &Value::U64(1234_u64)).unwrap();
|
||||
store.put(&mut writer, "float", &Value::F64(1234.0.into())).unwrap();
|
||||
store.put(&mut writer, "instant", &Value::Instant(1_528_318_073_700)).unwrap();
|
||||
store.put(&mut writer, "boolean", &Value::Bool(true)).unwrap();
|
||||
store.put(&mut writer, "string", &Value::Str("héllo, yöu")).unwrap();
|
||||
store.put(&mut writer, "json", &Value::Json(r#"{"foo":"bar", "number": 1}"#)).unwrap();
|
||||
store.put(&mut writer, "blob", &Value::Blob(b"blob")).unwrap();
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
println!("Testing getput");
|
||||
{
|
||||
let mut ids = Vec::new();
|
||||
let mut writer = k.write().unwrap();
|
||||
multistore.put(&mut writer, "str1", &Value::Str("string uno")).unwrap();
|
||||
multistore.put(&mut writer, "str1", &Value::Str("string dos")).unwrap();
|
||||
multistore.put(&mut writer, "str1", &Value::Str("string tres")).unwrap();
|
||||
multistore.put(&mut writer, "str2", &Value::Str("string quatro")).unwrap();
|
||||
multistore.put(&mut writer, "str2", &Value::Str("string cinco")).unwrap();
|
||||
multistore.put(&mut writer, "str2", &Value::Str("string seis")).unwrap();
|
||||
multistore.put(&mut writer, "str3", &Value::Str("string siete")).unwrap();
|
||||
multistore.put(&mut writer, "str3", &Value::Str("string ocho")).unwrap();
|
||||
multistore.put(&mut writer, "str3", &Value::Str("string nueve")).unwrap();
|
||||
getput(multistore, &mut writer, &mut ids);
|
||||
writer.commit().unwrap();
|
||||
let mut writer = k.write().unwrap();
|
||||
delete(multistore, &mut writer);
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
println!("Looking up keys...");
|
||||
{
|
||||
// Use a reader to query the store
|
||||
let reader = k.read().unwrap();
|
||||
println!("Get int {:?}", store.get(&reader, "int").unwrap());
|
||||
println!("Get uint {:?}", store.get(&reader, "uint").unwrap());
|
||||
println!("Get float {:?}", store.get(&reader, "float").unwrap());
|
||||
println!("Get instant {:?}", store.get(&reader, "instant").unwrap());
|
||||
println!("Get boolean {:?}", store.get(&reader, "boolean").unwrap());
|
||||
println!("Get string {:?}", store.get(&reader, "string").unwrap());
|
||||
println!("Get json {:?}", store.get(&reader, "json").unwrap());
|
||||
println!("Get blob {:?}", store.get(&reader, "blob").unwrap());
|
||||
println!("Get non-existent {:?}", store.get(&reader, "non-existent").unwrap());
|
||||
}
|
||||
|
||||
println!("Looking up keys via Writer.get()...");
|
||||
{
|
||||
let mut writer = k.write().unwrap();
|
||||
store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
|
||||
store.put(&mut writer, "bar", &Value::Str("baz")).unwrap();
|
||||
store.delete(&mut writer, "foo").unwrap();
|
||||
println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap());
|
||||
println!("Get bar ({:?})", store.get(&writer, "bar").unwrap());
|
||||
writer.commit().unwrap();
|
||||
let reader = k.read().expect("reader");
|
||||
println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
|
||||
println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
|
||||
}
|
||||
|
||||
println!("Aborting transaction...");
|
||||
{
|
||||
// Aborting a write transaction rollbacks the change(s)
|
||||
let mut writer = k.write().unwrap();
|
||||
store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
|
||||
writer.abort();
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
|
||||
// Explicitly aborting a transaction is not required unless an early
|
||||
// abort is desired, since both read and write transactions will
|
||||
// implicitly be aborted once they go out of scope.
|
||||
}
|
||||
|
||||
println!("Deleting keys...");
|
||||
{
|
||||
// Deleting a key/value also requires a write transaction
|
||||
let mut writer = k.write().unwrap();
|
||||
store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
|
||||
store.delete(&mut writer, "foo").unwrap();
|
||||
println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap());
|
||||
writer.commit().unwrap();
|
||||
|
||||
// Committing a transaction consumes the writer, preventing you
|
||||
// from reusing it by failing and reporting a compile-time error.
|
||||
// This line would report error[E0382]: use of moved value: `writer`.
|
||||
// store.put(&mut writer, "baz", &Value::Str("buz")).unwrap();
|
||||
}
|
||||
|
||||
println!("Clearing store...");
|
||||
{
|
||||
// Clearing a store deletes all the entries in that store
|
||||
let mut writer = k.write().unwrap();
|
||||
store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
|
||||
store.put(&mut writer, "bar", &Value::Str("baz")).unwrap();
|
||||
store.clear(&mut writer).unwrap();
|
||||
writer.commit().unwrap();
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
|
||||
println!("It should be None! ({:?})", store.get(&reader, "bar").unwrap());
|
||||
}
|
||||
|
||||
println!("Write and read on multiple stores...");
|
||||
{
|
||||
let another_store = k.open_single("another_store", StoreOptions::create()).unwrap();
|
||||
let mut writer = k.write().unwrap();
|
||||
store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
|
||||
another_store.put(&mut writer, "foo", &Value::Str("baz")).unwrap();
|
||||
writer.commit().unwrap();
|
||||
|
||||
let reader = k.read().unwrap();
|
||||
println!("Get from store value: {:?}", store.get(&reader, "foo").unwrap());
|
||||
println!("Get from another store value: {:?}", another_store.get(&reader, "foo").unwrap());
|
||||
}
|
||||
|
||||
println!("Environment statistics: btree depth = {}", k.stat().unwrap().depth());
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
cargo build --examples
|
||||
|
||||
for file in examples/*; do
|
||||
filename=$(basename ${file})
|
||||
extension=${filename##*.}
|
||||
example_name=${filename%.*}
|
||||
if [[ "${extension}" = "rs" ]]; then
|
||||
cargo run --example ${example_name}
|
||||
fi
|
||||
done
|
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
mod common;
|
||||
mod impl_lmdb;
|
||||
mod impl_safe;
|
||||
mod traits;
|
||||
|
||||
pub use common::*;
|
||||
pub use traits::*;
|
||||
|
||||
pub use impl_lmdb::{
|
||||
ArchMigrateError as LmdbArchMigrateError,
|
||||
ArchMigrateResult as LmdbArchMigrateResult,
|
||||
ArchMigrator as LmdbArchMigrator,
|
||||
DatabaseFlagsImpl as LmdbDatabaseFlags,
|
||||
DatabaseImpl as LmdbDatabase,
|
||||
EnvironmentBuilderImpl as Lmdb,
|
||||
EnvironmentFlagsImpl as LmdbEnvironmentFlags,
|
||||
EnvironmentImpl as LmdbEnvironment,
|
||||
ErrorImpl as LmdbError,
|
||||
InfoImpl as LmdbInfo,
|
||||
IterImpl as LmdbIter,
|
||||
RoCursorImpl as LmdbRoCursor,
|
||||
RoTransactionImpl as LmdbRoTransaction,
|
||||
RwCursorImpl as LmdbRwCursor,
|
||||
RwTransactionImpl as LmdbRwTransaction,
|
||||
StatImpl as LmdbStat,
|
||||
WriteFlagsImpl as LmdbWriteFlags,
|
||||
};
|
||||
|
||||
pub use impl_safe::{
|
||||
DatabaseFlagsImpl as SafeModeDatabaseFlags,
|
||||
DatabaseImpl as SafeModeDatabase,
|
||||
EnvironmentBuilderImpl as SafeMode,
|
||||
EnvironmentFlagsImpl as SafeModeEnvironmentFlags,
|
||||
EnvironmentImpl as SafeModeEnvironment,
|
||||
ErrorImpl as SafeModeError,
|
||||
InfoImpl as SafeModeInfo,
|
||||
IterImpl as SafeModeIter,
|
||||
RoCursorImpl as SafeModeRoCursor,
|
||||
RoTransactionImpl as SafeModeRoTransaction,
|
||||
RwCursorImpl as SafeModeRwCursor,
|
||||
RwTransactionImpl as SafeModeRwTransaction,
|
||||
StatImpl as SafeModeStat,
|
||||
WriteFlagsImpl as SafeModeWriteFlags,
|
||||
};
|
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
#![allow(non_camel_case_types)]
|
||||
|
||||
pub enum EnvironmentFlags {
|
||||
FIXED_MAP,
|
||||
NO_SUB_DIR,
|
||||
WRITE_MAP,
|
||||
READ_ONLY,
|
||||
NO_META_SYNC,
|
||||
NO_SYNC,
|
||||
MAP_ASYNC,
|
||||
NO_TLS,
|
||||
NO_LOCK,
|
||||
NO_READAHEAD,
|
||||
NO_MEM_INIT,
|
||||
}
|
||||
|
||||
pub enum DatabaseFlags {
|
||||
REVERSE_KEY,
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
DUP_SORT,
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
DUP_FIXED,
|
||||
#[cfg(feature = "db-int-key")]
|
||||
INTEGER_KEY,
|
||||
INTEGER_DUP,
|
||||
REVERSE_DUP,
|
||||
}
|
||||
|
||||
pub enum WriteFlags {
|
||||
NO_OVERWRITE,
|
||||
NO_DUP_DATA,
|
||||
CURRENT,
|
||||
APPEND,
|
||||
APPEND_DUP,
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
mod arch_migrator;
|
||||
mod arch_migrator_error;
|
||||
mod cursor;
|
||||
mod database;
|
||||
mod environment;
|
||||
mod error;
|
||||
mod flags;
|
||||
mod info;
|
||||
mod iter;
|
||||
mod stat;
|
||||
mod transaction;
|
||||
|
||||
pub use arch_migrator::{
|
||||
MigrateError as ArchMigrateError,
|
||||
MigrateResult as ArchMigrateResult,
|
||||
Migrator as ArchMigrator,
|
||||
};
|
||||
pub use cursor::{
|
||||
RoCursorImpl,
|
||||
RwCursorImpl,
|
||||
};
|
||||
pub use database::DatabaseImpl;
|
||||
pub use environment::{
|
||||
EnvironmentBuilderImpl,
|
||||
EnvironmentImpl,
|
||||
};
|
||||
pub use error::ErrorImpl;
|
||||
pub use flags::{
|
||||
DatabaseFlagsImpl,
|
||||
EnvironmentFlagsImpl,
|
||||
WriteFlagsImpl,
|
||||
};
|
||||
pub use info::InfoImpl;
|
||||
pub use iter::IterImpl;
|
||||
pub use stat::StatImpl;
|
||||
pub use transaction::{
|
||||
RoTransactionImpl,
|
||||
RwTransactionImpl,
|
||||
};
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
107
third_party/rust/rkv-0.15.0/src/backend/impl_lmdb/arch_migrator_error.rs
поставляемый
Normal file
107
third_party/rust/rkv-0.15.0/src/backend/impl_lmdb/arch_migrator_error.rs
поставляемый
Normal file
|
@ -0,0 +1,107 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
io,
|
||||
num,
|
||||
str,
|
||||
};
|
||||
|
||||
use failure::Fail;
|
||||
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum MigrateError {
|
||||
#[fail(display = "database not found: {:?}", _0)]
|
||||
DatabaseNotFound(String),
|
||||
|
||||
#[fail(display = "{}", _0)]
|
||||
FromString(String),
|
||||
|
||||
#[fail(display = "couldn't determine bit depth")]
|
||||
IndeterminateBitDepth,
|
||||
|
||||
#[fail(display = "I/O error: {:?}", _0)]
|
||||
IoError(io::Error),
|
||||
|
||||
#[fail(display = "invalid DatabaseFlags bits")]
|
||||
InvalidDatabaseBits,
|
||||
|
||||
#[fail(display = "invalid data version")]
|
||||
InvalidDataVersion,
|
||||
|
||||
#[fail(display = "invalid magic number")]
|
||||
InvalidMagicNum,
|
||||
|
||||
#[fail(display = "invalid NodeFlags bits")]
|
||||
InvalidNodeBits,
|
||||
|
||||
#[fail(display = "invalid PageFlags bits")]
|
||||
InvalidPageBits,
|
||||
|
||||
#[fail(display = "invalid page number")]
|
||||
InvalidPageNum,
|
||||
|
||||
#[fail(display = "lmdb backend error: {}", _0)]
|
||||
LmdbError(lmdb::Error),
|
||||
|
||||
#[fail(display = "string conversion error")]
|
||||
StringConversionError,
|
||||
|
||||
#[fail(display = "TryFromInt error: {:?}", _0)]
|
||||
TryFromIntError(num::TryFromIntError),
|
||||
|
||||
#[fail(display = "unexpected Page variant")]
|
||||
UnexpectedPageVariant,
|
||||
|
||||
#[fail(display = "unexpected PageHeader variant")]
|
||||
UnexpectedPageHeaderVariant,
|
||||
|
||||
#[fail(display = "unsupported PageHeader variant")]
|
||||
UnsupportedPageHeaderVariant,
|
||||
|
||||
#[fail(display = "UTF8 error: {:?}", _0)]
|
||||
Utf8Error(str::Utf8Error),
|
||||
}
|
||||
|
||||
impl From<io::Error> for MigrateError {
|
||||
fn from(e: io::Error) -> MigrateError {
|
||||
MigrateError::IoError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<str::Utf8Error> for MigrateError {
|
||||
fn from(e: str::Utf8Error) -> MigrateError {
|
||||
MigrateError::Utf8Error(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<num::TryFromIntError> for MigrateError {
|
||||
fn from(e: num::TryFromIntError) -> MigrateError {
|
||||
MigrateError::TryFromIntError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for MigrateError {
|
||||
fn from(e: &str) -> MigrateError {
|
||||
MigrateError::FromString(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for MigrateError {
|
||||
fn from(e: String) -> MigrateError {
|
||||
MigrateError::FromString(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<lmdb::Error> for MigrateError {
|
||||
fn from(e: lmdb::Error) -> MigrateError {
|
||||
MigrateError::LmdbError(e)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use lmdb::Cursor;
|
||||
|
||||
use super::IterImpl;
|
||||
use crate::backend::traits::BackendRoCursor;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RoCursorImpl<'c>(pub(crate) lmdb::RoCursor<'c>);
|
||||
|
||||
impl<'c> BackendRoCursor<'c> for RoCursorImpl<'c> {
|
||||
type Iter = IterImpl<'c, lmdb::RoCursor<'c>>;
|
||||
|
||||
fn into_iter(self) -> Self::Iter {
|
||||
// We call RoCursor.iter() instead of RoCursor.iter_start() because
|
||||
// the latter panics when there are no items in the store, whereas the
|
||||
// former returns an iterator that yields no items. And since we create
|
||||
// the Cursor and don't change its position, we can be sure that a call
|
||||
// to Cursor.iter() will start at the beginning.
|
||||
IterImpl::new(self.0, lmdb::RoCursor::iter)
|
||||
}
|
||||
|
||||
fn into_iter_from<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
IterImpl::new(self.0, |cursor| cursor.iter_from(key))
|
||||
}
|
||||
|
||||
fn into_iter_dup_of<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
IterImpl::new(self.0, |cursor| cursor.iter_dup_of(key))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RwCursorImpl<'c>(pub(crate) lmdb::RwCursor<'c>);
|
||||
|
||||
impl<'c> BackendRoCursor<'c> for RwCursorImpl<'c> {
|
||||
type Iter = IterImpl<'c, lmdb::RwCursor<'c>>;
|
||||
|
||||
fn into_iter(self) -> Self::Iter {
|
||||
IterImpl::new(self.0, lmdb::RwCursor::iter)
|
||||
}
|
||||
|
||||
fn into_iter_from<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
IterImpl::new(self.0, |cursor| cursor.iter_from(key))
|
||||
}
|
||||
|
||||
fn into_iter_dup_of<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
IterImpl::new(self.0, |cursor| cursor.iter_dup_of(key))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use crate::backend::traits::BackendDatabase;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||
pub struct DatabaseImpl(pub(crate) lmdb::Database);
|
||||
|
||||
impl BackendDatabase for DatabaseImpl {}
|
|
@ -0,0 +1,269 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
fs,
|
||||
path::{
|
||||
Path,
|
||||
PathBuf,
|
||||
},
|
||||
};
|
||||
|
||||
use lmdb::Error as LmdbError;
|
||||
|
||||
use super::{
|
||||
DatabaseFlagsImpl,
|
||||
DatabaseImpl,
|
||||
EnvironmentFlagsImpl,
|
||||
ErrorImpl,
|
||||
InfoImpl,
|
||||
RoTransactionImpl,
|
||||
RwTransactionImpl,
|
||||
StatImpl,
|
||||
};
|
||||
use crate::backend::traits::{
|
||||
BackendEnvironment,
|
||||
BackendEnvironmentBuilder,
|
||||
BackendInfo,
|
||||
BackendIter,
|
||||
BackendRoCursor,
|
||||
BackendRoCursorTransaction,
|
||||
BackendStat,
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct EnvironmentBuilderImpl {
|
||||
builder: lmdb::EnvironmentBuilder,
|
||||
env_path_type: EnvironmentPathType,
|
||||
env_lock_type: EnvironmentLockType,
|
||||
env_db_type: EnvironmentDefaultDbType,
|
||||
make_dir: bool,
|
||||
}
|
||||
|
||||
impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
||||
type Environment = EnvironmentImpl;
|
||||
type Error = ErrorImpl;
|
||||
type Flags = EnvironmentFlagsImpl;
|
||||
|
||||
fn new() -> EnvironmentBuilderImpl {
|
||||
EnvironmentBuilderImpl {
|
||||
builder: lmdb::Environment::new(),
|
||||
env_path_type: EnvironmentPathType::SubDir,
|
||||
env_lock_type: EnvironmentLockType::Lockfile,
|
||||
env_db_type: EnvironmentDefaultDbType::SingleDatabase,
|
||||
make_dir: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn set_flags<T>(&mut self, flags: T) -> &mut Self
|
||||
where
|
||||
T: Into<Self::Flags>,
|
||||
{
|
||||
let flags = flags.into();
|
||||
if flags.0 == lmdb::EnvironmentFlags::NO_SUB_DIR {
|
||||
self.env_path_type = EnvironmentPathType::NoSubDir;
|
||||
}
|
||||
if flags.0 == lmdb::EnvironmentFlags::NO_LOCK {
|
||||
self.env_lock_type = EnvironmentLockType::NoLockfile;
|
||||
}
|
||||
self.builder.set_flags(flags.0);
|
||||
self
|
||||
}
|
||||
|
||||
fn set_max_readers(&mut self, max_readers: u32) -> &mut Self {
|
||||
self.builder.set_max_readers(max_readers);
|
||||
self
|
||||
}
|
||||
|
||||
fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self {
|
||||
if max_dbs > 0 {
|
||||
self.env_db_type = EnvironmentDefaultDbType::MultipleNamedDatabases
|
||||
}
|
||||
self.builder.set_max_dbs(max_dbs);
|
||||
self
|
||||
}
|
||||
|
||||
fn set_map_size(&mut self, size: usize) -> &mut Self {
|
||||
self.builder.set_map_size(size);
|
||||
self
|
||||
}
|
||||
|
||||
fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self {
|
||||
self.make_dir = make_dir;
|
||||
self
|
||||
}
|
||||
|
||||
fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error> {
|
||||
match self.env_path_type {
|
||||
EnvironmentPathType::NoSubDir => {
|
||||
if !path.is_file() {
|
||||
return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into()));
|
||||
}
|
||||
},
|
||||
EnvironmentPathType::SubDir => {
|
||||
if !path.is_dir() {
|
||||
if !self.make_dir {
|
||||
return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into()));
|
||||
}
|
||||
fs::create_dir_all(path)?;
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
self.builder.open(path).map_err(ErrorImpl::LmdbError).and_then(|lmdbenv| {
|
||||
EnvironmentImpl::new(path, self.env_path_type, self.env_lock_type, self.env_db_type, lmdbenv)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum EnvironmentPathType {
|
||||
SubDir,
|
||||
NoSubDir,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum EnvironmentLockType {
|
||||
Lockfile,
|
||||
NoLockfile,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum EnvironmentDefaultDbType {
|
||||
SingleDatabase,
|
||||
MultipleNamedDatabases,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EnvironmentImpl {
|
||||
path: PathBuf,
|
||||
env_path_type: EnvironmentPathType,
|
||||
env_lock_type: EnvironmentLockType,
|
||||
env_db_type: EnvironmentDefaultDbType,
|
||||
lmdbenv: lmdb::Environment,
|
||||
}
|
||||
|
||||
impl EnvironmentImpl {
|
||||
pub(crate) fn new(
|
||||
path: &Path,
|
||||
env_path_type: EnvironmentPathType,
|
||||
env_lock_type: EnvironmentLockType,
|
||||
env_db_type: EnvironmentDefaultDbType,
|
||||
lmdbenv: lmdb::Environment,
|
||||
) -> Result<EnvironmentImpl, ErrorImpl> {
|
||||
Ok(EnvironmentImpl {
|
||||
path: path.to_path_buf(),
|
||||
env_path_type,
|
||||
env_lock_type,
|
||||
env_db_type,
|
||||
lmdbenv,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'e> BackendEnvironment<'e> for EnvironmentImpl {
|
||||
type Database = DatabaseImpl;
|
||||
type Error = ErrorImpl;
|
||||
type Flags = DatabaseFlagsImpl;
|
||||
type Info = InfoImpl;
|
||||
type RoTransaction = RoTransactionImpl<'e>;
|
||||
type RwTransaction = RwTransactionImpl<'e>;
|
||||
type Stat = StatImpl;
|
||||
|
||||
fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error> {
|
||||
if self.env_db_type == EnvironmentDefaultDbType::SingleDatabase {
|
||||
return Ok(vec![None]);
|
||||
}
|
||||
let db = self.lmdbenv.open_db(None).map(DatabaseImpl).map_err(ErrorImpl::LmdbError)?;
|
||||
let reader = self.begin_ro_txn()?;
|
||||
let cursor = reader.open_ro_cursor(&db)?;
|
||||
let mut iter = cursor.into_iter();
|
||||
let mut store = vec![];
|
||||
while let Some(result) = iter.next() {
|
||||
let (key, _) = result?;
|
||||
let name = String::from_utf8(key.to_owned()).map_err(|_| ErrorImpl::LmdbError(lmdb::Error::Corrupted))?;
|
||||
store.push(Some(name));
|
||||
}
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error> {
|
||||
self.lmdbenv.open_db(name).map(DatabaseImpl).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error> {
|
||||
self.lmdbenv.create_db(name, flags.0).map(DatabaseImpl).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn begin_ro_txn(&'e self) -> Result<Self::RoTransaction, Self::Error> {
|
||||
self.lmdbenv.begin_ro_txn().map(RoTransactionImpl).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn begin_rw_txn(&'e self) -> Result<Self::RwTransaction, Self::Error> {
|
||||
self.lmdbenv.begin_rw_txn().map(RwTransactionImpl).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn sync(&self, force: bool) -> Result<(), Self::Error> {
|
||||
self.lmdbenv.sync(force).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn stat(&self) -> Result<Self::Stat, Self::Error> {
|
||||
self.lmdbenv.stat().map(StatImpl).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn info(&self) -> Result<Self::Info, Self::Error> {
|
||||
self.lmdbenv.info().map(InfoImpl).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn freelist(&self) -> Result<usize, Self::Error> {
|
||||
self.lmdbenv.freelist().map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn load_ratio(&self) -> Result<Option<f32>, Self::Error> {
|
||||
let stat = self.stat()?;
|
||||
let info = self.info()?;
|
||||
let freelist = self.freelist()?;
|
||||
|
||||
let last_pgno = info.last_pgno() + 1; // pgno is 0 based.
|
||||
let total_pgs = info.map_size() / stat.page_size();
|
||||
if freelist > last_pgno {
|
||||
return Err(ErrorImpl::LmdbError(LmdbError::Corrupted));
|
||||
}
|
||||
let used_pgs = last_pgno - freelist;
|
||||
Ok(Some(used_pgs as f32 / total_pgs as f32))
|
||||
}
|
||||
|
||||
fn set_map_size(&self, size: usize) -> Result<(), Self::Error> {
|
||||
self.lmdbenv.set_map_size(size).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn get_files_on_disk(&self) -> Vec<PathBuf> {
|
||||
let mut store = vec![];
|
||||
|
||||
if self.env_path_type == EnvironmentPathType::NoSubDir {
|
||||
// The option NO_SUB_DIR could change the default directory layout; therefore this should
|
||||
// probably return the path used to create environment, along with the custom lockfile
|
||||
// when available.
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
let mut db_filename = self.path.clone();
|
||||
db_filename.push("data.mdb");
|
||||
store.push(db_filename);
|
||||
|
||||
if self.env_lock_type == EnvironmentLockType::Lockfile {
|
||||
let mut lock_filename = self.path.clone();
|
||||
lock_filename.push("lock.mdb");
|
||||
store.push(lock_filename);
|
||||
}
|
||||
|
||||
store
|
||||
}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
fmt,
|
||||
io,
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
backend::traits::BackendError,
|
||||
error::StoreError,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ErrorImpl {
|
||||
LmdbError(lmdb::Error),
|
||||
UnsuitableEnvironmentPath(PathBuf),
|
||||
IoError(io::Error),
|
||||
}
|
||||
|
||||
impl BackendError for ErrorImpl {}
|
||||
|
||||
impl fmt::Display for ErrorImpl {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
ErrorImpl::LmdbError(e) => e.fmt(fmt),
|
||||
ErrorImpl::UnsuitableEnvironmentPath(_) => write!(fmt, "UnsuitableEnvironmentPath"),
|
||||
ErrorImpl::IoError(e) => e.fmt(fmt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<StoreError> for ErrorImpl {
|
||||
fn into(self) -> StoreError {
|
||||
match self {
|
||||
ErrorImpl::LmdbError(lmdb::Error::Corrupted) => StoreError::DatabaseCorrupted,
|
||||
ErrorImpl::LmdbError(lmdb::Error::NotFound) => StoreError::KeyValuePairNotFound,
|
||||
ErrorImpl::LmdbError(lmdb::Error::BadValSize) => StoreError::KeyValuePairBadSize,
|
||||
ErrorImpl::LmdbError(lmdb::Error::Invalid) => StoreError::FileInvalid,
|
||||
ErrorImpl::LmdbError(lmdb::Error::MapFull) => StoreError::MapFull,
|
||||
ErrorImpl::LmdbError(lmdb::Error::DbsFull) => StoreError::DbsFull,
|
||||
ErrorImpl::LmdbError(lmdb::Error::ReadersFull) => StoreError::ReadersFull,
|
||||
ErrorImpl::LmdbError(error) => StoreError::LmdbError(error),
|
||||
ErrorImpl::UnsuitableEnvironmentPath(path) => StoreError::UnsuitableEnvironmentPath(path),
|
||||
ErrorImpl::IoError(error) => StoreError::IoError(error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for ErrorImpl {
|
||||
fn from(e: io::Error) -> ErrorImpl {
|
||||
ErrorImpl::IoError(e)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use crate::backend::{
|
||||
common::{
|
||||
DatabaseFlags,
|
||||
EnvironmentFlags,
|
||||
WriteFlags,
|
||||
},
|
||||
traits::{
|
||||
BackendDatabaseFlags,
|
||||
BackendEnvironmentFlags,
|
||||
BackendFlags,
|
||||
BackendWriteFlags,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
|
||||
pub struct EnvironmentFlagsImpl(pub(crate) lmdb::EnvironmentFlags);
|
||||
|
||||
impl BackendFlags for EnvironmentFlagsImpl {
|
||||
fn empty() -> EnvironmentFlagsImpl {
|
||||
EnvironmentFlagsImpl(lmdb::EnvironmentFlags::empty())
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendEnvironmentFlags for EnvironmentFlagsImpl {
|
||||
fn set(&mut self, flag: EnvironmentFlags, value: bool) {
|
||||
self.0.set(flag.into(), value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<EnvironmentFlagsImpl> for EnvironmentFlags {
|
||||
fn into(self) -> EnvironmentFlagsImpl {
|
||||
EnvironmentFlagsImpl(self.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<lmdb::EnvironmentFlags> for EnvironmentFlags {
|
||||
fn into(self) -> lmdb::EnvironmentFlags {
|
||||
match self {
|
||||
EnvironmentFlags::FIXED_MAP => lmdb::EnvironmentFlags::FIXED_MAP,
|
||||
EnvironmentFlags::NO_SUB_DIR => lmdb::EnvironmentFlags::NO_SUB_DIR,
|
||||
EnvironmentFlags::WRITE_MAP => lmdb::EnvironmentFlags::WRITE_MAP,
|
||||
EnvironmentFlags::READ_ONLY => lmdb::EnvironmentFlags::READ_ONLY,
|
||||
EnvironmentFlags::NO_META_SYNC => lmdb::EnvironmentFlags::NO_META_SYNC,
|
||||
EnvironmentFlags::NO_SYNC => lmdb::EnvironmentFlags::NO_SYNC,
|
||||
EnvironmentFlags::MAP_ASYNC => lmdb::EnvironmentFlags::MAP_ASYNC,
|
||||
EnvironmentFlags::NO_TLS => lmdb::EnvironmentFlags::NO_TLS,
|
||||
EnvironmentFlags::NO_LOCK => lmdb::EnvironmentFlags::NO_LOCK,
|
||||
EnvironmentFlags::NO_READAHEAD => lmdb::EnvironmentFlags::NO_READAHEAD,
|
||||
EnvironmentFlags::NO_MEM_INIT => lmdb::EnvironmentFlags::NO_MEM_INIT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
|
||||
pub struct DatabaseFlagsImpl(pub(crate) lmdb::DatabaseFlags);
|
||||
|
||||
impl BackendFlags for DatabaseFlagsImpl {
|
||||
fn empty() -> DatabaseFlagsImpl {
|
||||
DatabaseFlagsImpl(lmdb::DatabaseFlags::empty())
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendDatabaseFlags for DatabaseFlagsImpl {
|
||||
fn set(&mut self, flag: DatabaseFlags, value: bool) {
|
||||
self.0.set(flag.into(), value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<DatabaseFlagsImpl> for DatabaseFlags {
|
||||
fn into(self) -> DatabaseFlagsImpl {
|
||||
DatabaseFlagsImpl(self.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<lmdb::DatabaseFlags> for DatabaseFlags {
|
||||
fn into(self) -> lmdb::DatabaseFlags {
|
||||
match self {
|
||||
DatabaseFlags::REVERSE_KEY => lmdb::DatabaseFlags::REVERSE_KEY,
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
DatabaseFlags::DUP_SORT => lmdb::DatabaseFlags::DUP_SORT,
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
DatabaseFlags::DUP_FIXED => lmdb::DatabaseFlags::DUP_FIXED,
|
||||
#[cfg(feature = "db-int-key")]
|
||||
DatabaseFlags::INTEGER_KEY => lmdb::DatabaseFlags::INTEGER_KEY,
|
||||
DatabaseFlags::INTEGER_DUP => lmdb::DatabaseFlags::INTEGER_DUP,
|
||||
DatabaseFlags::REVERSE_DUP => lmdb::DatabaseFlags::REVERSE_DUP,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
|
||||
pub struct WriteFlagsImpl(pub(crate) lmdb::WriteFlags);
|
||||
|
||||
impl BackendFlags for WriteFlagsImpl {
|
||||
fn empty() -> WriteFlagsImpl {
|
||||
WriteFlagsImpl(lmdb::WriteFlags::empty())
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendWriteFlags for WriteFlagsImpl {
|
||||
fn set(&mut self, flag: WriteFlags, value: bool) {
|
||||
self.0.set(flag.into(), value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<WriteFlagsImpl> for WriteFlags {
|
||||
fn into(self) -> WriteFlagsImpl {
|
||||
WriteFlagsImpl(self.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<lmdb::WriteFlags> for WriteFlags {
|
||||
fn into(self) -> lmdb::WriteFlags {
|
||||
match self {
|
||||
WriteFlags::NO_OVERWRITE => lmdb::WriteFlags::NO_OVERWRITE,
|
||||
WriteFlags::NO_DUP_DATA => lmdb::WriteFlags::NO_DUP_DATA,
|
||||
WriteFlags::CURRENT => lmdb::WriteFlags::CURRENT,
|
||||
WriteFlags::APPEND => lmdb::WriteFlags::APPEND,
|
||||
WriteFlags::APPEND_DUP => lmdb::WriteFlags::APPEND_DUP,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use crate::backend::traits::BackendInfo;
|
||||
|
||||
pub struct InfoImpl(pub(crate) lmdb::Info);
|
||||
|
||||
impl BackendInfo for InfoImpl {
|
||||
fn map_size(&self) -> usize {
|
||||
self.0.map_size()
|
||||
}
|
||||
|
||||
fn last_pgno(&self) -> usize {
|
||||
self.0.last_pgno()
|
||||
}
|
||||
|
||||
fn last_txnid(&self) -> usize {
|
||||
self.0.last_txnid()
|
||||
}
|
||||
|
||||
fn max_readers(&self) -> usize {
|
||||
self.0.max_readers() as usize
|
||||
}
|
||||
|
||||
fn num_readers(&self) -> usize {
|
||||
self.0.num_readers() as usize
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use super::ErrorImpl;
|
||||
use crate::backend::traits::BackendIter;
|
||||
|
||||
pub struct IterImpl<'i, C> {
|
||||
// LMDB semantics dictate that a cursor must be valid for the entire lifetime
|
||||
// of an iterator. In other words, cursors must not be dropped while an
|
||||
// iterator built from it is alive. Unfortunately, the LMDB crate API does
|
||||
// not express this through the type system, so we must enforce it somehow.
|
||||
#[allow(dead_code)]
|
||||
cursor: C,
|
||||
iter: lmdb::Iter<'i>,
|
||||
}
|
||||
|
||||
impl<'i, C> IterImpl<'i, C> {
|
||||
pub(crate) fn new(mut cursor: C, to_iter: impl FnOnce(&mut C) -> lmdb::Iter<'i>) -> IterImpl<'i, C> {
|
||||
let iter = to_iter(&mut cursor);
|
||||
IterImpl {
|
||||
cursor,
|
||||
iter,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'i, C> BackendIter<'i> for IterImpl<'i, C> {
|
||||
type Error = ErrorImpl;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn next(&mut self) -> Option<Result<(&'i [u8], &'i [u8]), Self::Error>> {
|
||||
self.iter.next().map(|e| e.map_err(ErrorImpl::LmdbError))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use crate::backend::traits::BackendStat;
|
||||
|
||||
pub struct StatImpl(pub(crate) lmdb::Stat);
|
||||
|
||||
impl BackendStat for StatImpl {
|
||||
fn page_size(&self) -> usize {
|
||||
self.0.page_size() as usize
|
||||
}
|
||||
|
||||
fn depth(&self) -> usize {
|
||||
self.0.depth() as usize
|
||||
}
|
||||
|
||||
fn branch_pages(&self) -> usize {
|
||||
self.0.branch_pages()
|
||||
}
|
||||
|
||||
fn leaf_pages(&self) -> usize {
|
||||
self.0.leaf_pages()
|
||||
}
|
||||
|
||||
fn overflow_pages(&self) -> usize {
|
||||
self.0.overflow_pages()
|
||||
}
|
||||
|
||||
fn entries(&self) -> usize {
|
||||
self.0.entries()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use lmdb::Transaction;
|
||||
|
||||
use super::{
|
||||
DatabaseImpl,
|
||||
ErrorImpl,
|
||||
RoCursorImpl,
|
||||
WriteFlagsImpl,
|
||||
};
|
||||
use crate::backend::traits::{
|
||||
BackendRoCursorTransaction,
|
||||
BackendRoTransaction,
|
||||
BackendRwCursorTransaction,
|
||||
BackendRwTransaction,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RoTransactionImpl<'t>(pub(crate) lmdb::RoTransaction<'t>);
|
||||
|
||||
impl<'t> BackendRoTransaction for RoTransactionImpl<'t> {
|
||||
type Database = DatabaseImpl;
|
||||
type Error = ErrorImpl;
|
||||
|
||||
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {
|
||||
self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn abort(self) {
|
||||
self.0.abort()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'t> BackendRoCursorTransaction<'t> for RoTransactionImpl<'t> {
|
||||
type RoCursor = RoCursorImpl<'t>;
|
||||
|
||||
fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> {
|
||||
self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RwTransactionImpl<'t>(pub(crate) lmdb::RwTransaction<'t>);
|
||||
|
||||
impl<'t> BackendRwTransaction for RwTransactionImpl<'t> {
|
||||
type Database = DatabaseImpl;
|
||||
type Error = ErrorImpl;
|
||||
type Flags = WriteFlagsImpl;
|
||||
|
||||
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {
|
||||
self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], flags: Self::Flags) -> Result<(), Self::Error> {
|
||||
self.0.put(db.0, &key, &value, flags.0).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "db-dup-sort"))]
|
||||
fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error> {
|
||||
self.0.del(db.0, &key, None).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
fn del(&mut self, db: &Self::Database, key: &[u8], value: Option<&[u8]>) -> Result<(), Self::Error> {
|
||||
self.0.del(db.0, &key, value).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error> {
|
||||
self.0.clear_db(db.0).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn commit(self) -> Result<(), Self::Error> {
|
||||
self.0.commit().map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
|
||||
fn abort(self) {
|
||||
self.0.abort()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'t> BackendRwCursorTransaction<'t> for RwTransactionImpl<'t> {
|
||||
type RoCursor = RoCursorImpl<'t>;
|
||||
|
||||
fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> {
|
||||
self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl::LmdbError)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
mod cursor;
|
||||
mod database;
|
||||
mod environment;
|
||||
mod error;
|
||||
mod flags;
|
||||
mod info;
|
||||
mod iter;
|
||||
mod snapshot;
|
||||
mod stat;
|
||||
mod transaction;
|
||||
|
||||
pub use cursor::{
|
||||
RoCursorImpl,
|
||||
RwCursorImpl,
|
||||
};
|
||||
pub use database::DatabaseImpl;
|
||||
pub use environment::{
|
||||
EnvironmentBuilderImpl,
|
||||
EnvironmentImpl,
|
||||
};
|
||||
pub use error::ErrorImpl;
|
||||
pub use flags::{
|
||||
DatabaseFlagsImpl,
|
||||
EnvironmentFlagsImpl,
|
||||
WriteFlagsImpl,
|
||||
};
|
||||
pub use info::InfoImpl;
|
||||
pub use iter::IterImpl;
|
||||
pub use stat::StatImpl;
|
||||
pub use transaction::{
|
||||
RoTransactionImpl,
|
||||
RwTransactionImpl,
|
||||
};
|
|
@ -0,0 +1,94 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use super::{
|
||||
snapshot::Snapshot,
|
||||
IterImpl,
|
||||
};
|
||||
use crate::backend::traits::BackendRoCursor;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RoCursorImpl<'c>(pub(crate) &'c Snapshot);
|
||||
|
||||
#[cfg(not(feature = "db-dup-sort"))]
|
||||
impl<'c> BackendRoCursor<'c> for RoCursorImpl<'c> {
|
||||
type Iter = IterImpl<'c>;
|
||||
|
||||
fn into_iter(self) -> Self::Iter {
|
||||
IterImpl(Box::new(self.0.iter()))
|
||||
}
|
||||
|
||||
fn into_iter_from<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
IterImpl(Box::new(self.0.iter().skip_while(move |&(k, _)| k < key.as_ref())))
|
||||
}
|
||||
|
||||
fn into_iter_dup_of<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
IterImpl(Box::new(self.0.iter().filter(move |&(k, _)| k == key.as_ref())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
impl<'c> BackendRoCursor<'c> for RoCursorImpl<'c> {
|
||||
type Iter = IterImpl<'c>;
|
||||
|
||||
fn into_iter(self) -> Self::Iter {
|
||||
let flattened = self.0.iter().flat_map(|(key, values)| values.map(move |value| (key, value)));
|
||||
IterImpl(Box::new(flattened))
|
||||
}
|
||||
|
||||
fn into_iter_from<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
let skipped = self.0.iter().skip_while(move |&(k, _)| k < key.as_ref());
|
||||
let flattened = skipped.flat_map(|(key, values)| values.map(move |value| (key, value)));
|
||||
IterImpl(Box::new(flattened))
|
||||
}
|
||||
|
||||
fn into_iter_dup_of<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
let filtered = self.0.iter().filter(move |&(k, _)| k == key.as_ref());
|
||||
let flattened = filtered.flat_map(|(key, values)| values.map(move |value| (key, value)));
|
||||
IterImpl(Box::new(flattened))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RwCursorImpl<'c>(&'c mut Snapshot);
|
||||
|
||||
impl<'c> BackendRoCursor<'c> for RwCursorImpl<'c> {
|
||||
type Iter = IterImpl<'c>;
|
||||
|
||||
fn into_iter(self) -> Self::Iter {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn into_iter_from<K>(self, _key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn into_iter_dup_of<K>(self, _key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c,
|
||||
{
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use id_arena::Id;
|
||||
use serde_derive::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use super::{
|
||||
snapshot::Snapshot,
|
||||
DatabaseFlagsImpl,
|
||||
};
|
||||
use crate::backend::traits::BackendDatabase;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone, Hash)]
|
||||
pub struct DatabaseImpl(pub(crate) Id<Database>);
|
||||
|
||||
impl BackendDatabase for DatabaseImpl {}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Database {
|
||||
snapshot: Snapshot,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
pub(crate) fn new(flags: Option<DatabaseFlagsImpl>, snapshot: Option<Snapshot>) -> Database {
|
||||
Database {
|
||||
snapshot: snapshot.unwrap_or_else(|| Snapshot::new(flags)),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn snapshot(&self) -> Snapshot {
|
||||
self.snapshot.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn replace(&mut self, snapshot: Snapshot) -> Snapshot {
|
||||
std::mem::replace(&mut self.snapshot, snapshot)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,289 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
fs,
|
||||
path::{
|
||||
Path,
|
||||
PathBuf,
|
||||
},
|
||||
sync::{
|
||||
Arc,
|
||||
RwLock,
|
||||
RwLockReadGuard,
|
||||
RwLockWriteGuard,
|
||||
},
|
||||
};
|
||||
|
||||
use id_arena::Arena;
|
||||
use log::warn;
|
||||
|
||||
use super::{
|
||||
database::Database,
|
||||
DatabaseFlagsImpl,
|
||||
DatabaseImpl,
|
||||
EnvironmentFlagsImpl,
|
||||
ErrorImpl,
|
||||
InfoImpl,
|
||||
RoTransactionImpl,
|
||||
RwTransactionImpl,
|
||||
StatImpl,
|
||||
};
|
||||
use crate::backend::traits::{
|
||||
BackendEnvironment,
|
||||
BackendEnvironmentBuilder,
|
||||
};
|
||||
|
||||
const DEFAULT_DB_FILENAME: &str = "data.safe.bin";
|
||||
|
||||
type DatabaseArena = Arena<Database>;
|
||||
type DatabaseNameMap = HashMap<Option<String>, DatabaseImpl>;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct EnvironmentBuilderImpl {
|
||||
flags: EnvironmentFlagsImpl,
|
||||
max_readers: Option<usize>,
|
||||
max_dbs: Option<usize>,
|
||||
map_size: Option<usize>,
|
||||
make_dir: bool,
|
||||
}
|
||||
|
||||
impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
||||
type Environment = EnvironmentImpl;
|
||||
type Error = ErrorImpl;
|
||||
type Flags = EnvironmentFlagsImpl;
|
||||
|
||||
fn new() -> EnvironmentBuilderImpl {
|
||||
EnvironmentBuilderImpl {
|
||||
flags: EnvironmentFlagsImpl::empty(),
|
||||
max_readers: None,
|
||||
max_dbs: None,
|
||||
map_size: None,
|
||||
make_dir: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn set_flags<T>(&mut self, flags: T) -> &mut Self
|
||||
where
|
||||
T: Into<Self::Flags>,
|
||||
{
|
||||
self.flags = flags.into();
|
||||
self
|
||||
}
|
||||
|
||||
fn set_max_readers(&mut self, max_readers: u32) -> &mut Self {
|
||||
self.max_readers = Some(max_readers as usize);
|
||||
self
|
||||
}
|
||||
|
||||
fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self {
|
||||
self.max_dbs = Some(max_dbs as usize);
|
||||
self
|
||||
}
|
||||
|
||||
fn set_map_size(&mut self, map_size: usize) -> &mut Self {
|
||||
self.map_size = Some(map_size);
|
||||
self
|
||||
}
|
||||
|
||||
fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self {
|
||||
self.make_dir = make_dir;
|
||||
self
|
||||
}
|
||||
|
||||
fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error> {
|
||||
// Technically NO_SUB_DIR should change these checks here, but they're both currently
|
||||
// unimplemented with this storage backend.
|
||||
if !path.is_dir() {
|
||||
if !self.make_dir {
|
||||
return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into()));
|
||||
}
|
||||
fs::create_dir_all(path)?;
|
||||
}
|
||||
let mut env = EnvironmentImpl::new(path, self.flags, self.max_readers, self.max_dbs, self.map_size)?;
|
||||
env.read_from_disk()?;
|
||||
Ok(env)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EnvironmentImpl {
|
||||
path: PathBuf,
|
||||
max_dbs: usize,
|
||||
arena: RwLock<DatabaseArena>,
|
||||
dbs: RwLock<DatabaseNameMap>,
|
||||
ro_txns: Arc<()>,
|
||||
rw_txns: Arc<()>,
|
||||
}
|
||||
|
||||
impl EnvironmentImpl {
|
||||
fn serialize(&self) -> Result<Vec<u8>, ErrorImpl> {
|
||||
let arena = self.arena.read().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
let data: HashMap<_, _> = dbs.iter().map(|(name, id)| (name, &arena[id.0])).collect();
|
||||
Ok(bincode::serialize(&data)?)
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Result<(DatabaseArena, DatabaseNameMap), ErrorImpl> {
|
||||
let mut arena = DatabaseArena::new();
|
||||
let mut dbs = HashMap::new();
|
||||
let data: HashMap<_, _> = bincode::deserialize(&bytes)?;
|
||||
for (name, db) in data {
|
||||
dbs.insert(name, DatabaseImpl(arena.alloc(db)));
|
||||
}
|
||||
Ok((arena, dbs))
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvironmentImpl {
|
||||
pub(crate) fn new(
|
||||
path: &Path,
|
||||
flags: EnvironmentFlagsImpl,
|
||||
max_readers: Option<usize>,
|
||||
max_dbs: Option<usize>,
|
||||
map_size: Option<usize>,
|
||||
) -> Result<EnvironmentImpl, ErrorImpl> {
|
||||
if !flags.is_empty() {
|
||||
warn!("Ignoring `flags={:?}`", flags);
|
||||
}
|
||||
if let Some(max_readers) = max_readers {
|
||||
warn!("Ignoring `max_readers={}`", max_readers);
|
||||
}
|
||||
if let Some(map_size) = map_size {
|
||||
warn!("Ignoring `map_size={}`", map_size);
|
||||
}
|
||||
|
||||
Ok(EnvironmentImpl {
|
||||
path: path.to_path_buf(),
|
||||
max_dbs: max_dbs.unwrap_or(std::usize::MAX),
|
||||
arena: RwLock::new(DatabaseArena::new()),
|
||||
dbs: RwLock::new(HashMap::new()),
|
||||
ro_txns: Arc::new(()),
|
||||
rw_txns: Arc::new(()),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn read_from_disk(&mut self) -> Result<(), ErrorImpl> {
|
||||
let mut path = Cow::from(&self.path);
|
||||
if fs::metadata(&path)?.is_dir() {
|
||||
path.to_mut().push(DEFAULT_DB_FILENAME);
|
||||
};
|
||||
if fs::metadata(&path).is_err() {
|
||||
return Ok(());
|
||||
};
|
||||
let (arena, dbs) = Self::deserialize(&fs::read(&path)?)?;
|
||||
self.arena = RwLock::new(arena);
|
||||
self.dbs = RwLock::new(dbs);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn write_to_disk(&self) -> Result<(), ErrorImpl> {
|
||||
let mut path = Cow::from(&self.path);
|
||||
if fs::metadata(&path)?.is_dir() {
|
||||
path.to_mut().push(DEFAULT_DB_FILENAME);
|
||||
};
|
||||
fs::write(&path, self.serialize()?)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn dbs(&self) -> Result<RwLockReadGuard<DatabaseArena>, ErrorImpl> {
|
||||
self.arena.read().map_err(|_| ErrorImpl::EnvPoisonError)
|
||||
}
|
||||
|
||||
pub(crate) fn dbs_mut(&self) -> Result<RwLockWriteGuard<DatabaseArena>, ErrorImpl> {
|
||||
self.arena.write().map_err(|_| ErrorImpl::EnvPoisonError)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'e> BackendEnvironment<'e> for EnvironmentImpl {
|
||||
type Database = DatabaseImpl;
|
||||
type Error = ErrorImpl;
|
||||
type Flags = DatabaseFlagsImpl;
|
||||
type Info = InfoImpl;
|
||||
type RoTransaction = RoTransactionImpl<'e>;
|
||||
type RwTransaction = RwTransactionImpl<'e>;
|
||||
type Stat = StatImpl;
|
||||
|
||||
fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error> {
|
||||
let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
Ok(dbs.keys().map(|key| key.to_owned()).collect())
|
||||
}
|
||||
|
||||
fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error> {
|
||||
if Arc::strong_count(&self.ro_txns) > 1 {
|
||||
return Err(ErrorImpl::DbsIllegalOpen);
|
||||
}
|
||||
// TOOD: don't reallocate `name`.
|
||||
let key = name.map(String::from);
|
||||
let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
let id = dbs.get(&key).ok_or(ErrorImpl::DbNotFoundError)?;
|
||||
Ok(*id)
|
||||
}
|
||||
|
||||
fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error> {
|
||||
if Arc::strong_count(&self.ro_txns) > 1 {
|
||||
return Err(ErrorImpl::DbsIllegalOpen);
|
||||
}
|
||||
// TOOD: don't reallocate `name`.
|
||||
let key = name.map(String::from);
|
||||
let mut dbs = self.dbs.write().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
let mut arena = self.arena.write().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
if dbs.keys().filter_map(|k| k.as_ref()).count() >= self.max_dbs && name != None {
|
||||
return Err(ErrorImpl::DbsFull);
|
||||
}
|
||||
let id = dbs.entry(key).or_insert_with(|| DatabaseImpl(arena.alloc(Database::new(Some(flags), None))));
|
||||
Ok(*id)
|
||||
}
|
||||
|
||||
fn begin_ro_txn(&'e self) -> Result<Self::RoTransaction, Self::Error> {
|
||||
RoTransactionImpl::new(self, self.ro_txns.clone())
|
||||
}
|
||||
|
||||
fn begin_rw_txn(&'e self) -> Result<Self::RwTransaction, Self::Error> {
|
||||
RwTransactionImpl::new(self, self.rw_txns.clone())
|
||||
}
|
||||
|
||||
fn sync(&self, force: bool) -> Result<(), Self::Error> {
|
||||
warn!("Ignoring `force={}`", force);
|
||||
self.write_to_disk()
|
||||
}
|
||||
|
||||
fn stat(&self) -> Result<Self::Stat, Self::Error> {
|
||||
Ok(StatImpl)
|
||||
}
|
||||
|
||||
fn info(&self) -> Result<Self::Info, Self::Error> {
|
||||
Ok(InfoImpl)
|
||||
}
|
||||
|
||||
fn freelist(&self) -> Result<usize, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn load_ratio(&self) -> Result<Option<f32>, Self::Error> {
|
||||
warn!("`load_ratio()` is irrelevant for this storage backend.");
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn set_map_size(&self, size: usize) -> Result<(), Self::Error> {
|
||||
warn!("`set_map_size({})` is ignored by this storage backend.", size);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_files_on_disk(&self) -> Vec<PathBuf> {
|
||||
// Technically NO_SUB_DIR and NO_LOCK should change this output, but
|
||||
// they're both currently unimplemented with this storage backend.
|
||||
let mut db_filename = self.path.clone();
|
||||
db_filename.push(DEFAULT_DB_FILENAME);
|
||||
return vec![db_filename];
|
||||
}
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
fmt,
|
||||
io,
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use bincode::Error as BincodeError;
|
||||
|
||||
use crate::{
|
||||
backend::traits::BackendError,
|
||||
error::StoreError,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ErrorImpl {
|
||||
KeyValuePairNotFound,
|
||||
EnvPoisonError,
|
||||
DbsFull,
|
||||
DbsIllegalOpen,
|
||||
DbNotFoundError,
|
||||
DbIsForeignError,
|
||||
UnsuitableEnvironmentPath(PathBuf),
|
||||
IoError(io::Error),
|
||||
BincodeError(BincodeError),
|
||||
}
|
||||
|
||||
impl BackendError for ErrorImpl {}
|
||||
|
||||
impl fmt::Display for ErrorImpl {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
ErrorImpl::KeyValuePairNotFound => write!(fmt, "KeyValuePairNotFound (safe mode)"),
|
||||
ErrorImpl::EnvPoisonError => write!(fmt, "EnvPoisonError (safe mode)"),
|
||||
ErrorImpl::DbsFull => write!(fmt, "DbsFull (safe mode)"),
|
||||
ErrorImpl::DbsIllegalOpen => write!(fmt, "DbIllegalOpen (safe mode)"),
|
||||
ErrorImpl::DbNotFoundError => write!(fmt, "DbNotFoundError (safe mode)"),
|
||||
ErrorImpl::DbIsForeignError => write!(fmt, "DbIsForeignError (safe mode)"),
|
||||
ErrorImpl::UnsuitableEnvironmentPath(_) => write!(fmt, "UnsuitableEnvironmentPath (safe mode)"),
|
||||
ErrorImpl::IoError(e) => e.fmt(fmt),
|
||||
ErrorImpl::BincodeError(e) => e.fmt(fmt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<StoreError> for ErrorImpl {
|
||||
fn into(self) -> StoreError {
|
||||
// The `StoreError::KeyValuePairBadSize` error is unused, because this
|
||||
// backend supports keys and values of arbitrary sizes.
|
||||
// The `StoreError::MapFull` and `StoreError::ReadersFull` are
|
||||
// unimplemented yet, but they should be in the future.
|
||||
match self {
|
||||
ErrorImpl::KeyValuePairNotFound => StoreError::KeyValuePairNotFound,
|
||||
ErrorImpl::BincodeError(_) => StoreError::FileInvalid,
|
||||
ErrorImpl::DbsFull => StoreError::DbsFull,
|
||||
ErrorImpl::UnsuitableEnvironmentPath(path) => StoreError::UnsuitableEnvironmentPath(path),
|
||||
ErrorImpl::IoError(error) => StoreError::IoError(error),
|
||||
_ => StoreError::SafeModeError(self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for ErrorImpl {
|
||||
fn from(e: io::Error) -> ErrorImpl {
|
||||
ErrorImpl::IoError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BincodeError> for ErrorImpl {
|
||||
fn from(e: BincodeError) -> ErrorImpl {
|
||||
ErrorImpl::BincodeError(e)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use bitflags::bitflags;
|
||||
use serde_derive::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use crate::backend::{
|
||||
common::{
|
||||
DatabaseFlags,
|
||||
EnvironmentFlags,
|
||||
WriteFlags,
|
||||
},
|
||||
traits::{
|
||||
BackendDatabaseFlags,
|
||||
BackendEnvironmentFlags,
|
||||
BackendFlags,
|
||||
BackendWriteFlags,
|
||||
},
|
||||
};
|
||||
|
||||
bitflags! {
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub struct EnvironmentFlagsImpl: u32 {
|
||||
const NIL = 0b0000_0000;
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendFlags for EnvironmentFlagsImpl {
|
||||
fn empty() -> EnvironmentFlagsImpl {
|
||||
EnvironmentFlagsImpl::empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendEnvironmentFlags for EnvironmentFlagsImpl {
|
||||
fn set(&mut self, flag: EnvironmentFlags, value: bool) {
|
||||
self.set(flag.into(), value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<EnvironmentFlagsImpl> for EnvironmentFlags {
|
||||
fn into(self) -> EnvironmentFlagsImpl {
|
||||
match self {
|
||||
EnvironmentFlags::FIXED_MAP => unimplemented!(),
|
||||
EnvironmentFlags::NO_SUB_DIR => unimplemented!(),
|
||||
EnvironmentFlags::WRITE_MAP => unimplemented!(),
|
||||
EnvironmentFlags::READ_ONLY => unimplemented!(),
|
||||
EnvironmentFlags::NO_META_SYNC => unimplemented!(),
|
||||
EnvironmentFlags::NO_SYNC => unimplemented!(),
|
||||
EnvironmentFlags::MAP_ASYNC => unimplemented!(),
|
||||
EnvironmentFlags::NO_TLS => unimplemented!(),
|
||||
EnvironmentFlags::NO_LOCK => unimplemented!(),
|
||||
EnvironmentFlags::NO_READAHEAD => unimplemented!(),
|
||||
EnvironmentFlags::NO_MEM_INIT => unimplemented!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub struct DatabaseFlagsImpl: u32 {
|
||||
const NIL = 0b0000_0000;
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
const DUP_SORT = 0b0000_0001;
|
||||
#[cfg(feature = "db-int-key")]
|
||||
const INTEGER_KEY = 0b0000_0010;
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendFlags for DatabaseFlagsImpl {
|
||||
fn empty() -> DatabaseFlagsImpl {
|
||||
DatabaseFlagsImpl::empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendDatabaseFlags for DatabaseFlagsImpl {
|
||||
fn set(&mut self, flag: DatabaseFlags, value: bool) {
|
||||
self.set(flag.into(), value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<DatabaseFlagsImpl> for DatabaseFlags {
|
||||
fn into(self) -> DatabaseFlagsImpl {
|
||||
match self {
|
||||
DatabaseFlags::REVERSE_KEY => unimplemented!(),
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
DatabaseFlags::DUP_SORT => DatabaseFlagsImpl::DUP_SORT,
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
DatabaseFlags::DUP_FIXED => unimplemented!(),
|
||||
#[cfg(feature = "db-int-key")]
|
||||
DatabaseFlags::INTEGER_KEY => DatabaseFlagsImpl::INTEGER_KEY,
|
||||
DatabaseFlags::INTEGER_DUP => unimplemented!(),
|
||||
DatabaseFlags::REVERSE_DUP => unimplemented!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub struct WriteFlagsImpl: u32 {
|
||||
const NIL = 0b0000_0000;
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendFlags for WriteFlagsImpl {
|
||||
fn empty() -> WriteFlagsImpl {
|
||||
WriteFlagsImpl::empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl BackendWriteFlags for WriteFlagsImpl {
|
||||
fn set(&mut self, flag: WriteFlags, value: bool) {
|
||||
self.set(flag.into(), value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<WriteFlagsImpl> for WriteFlags {
|
||||
fn into(self) -> WriteFlagsImpl {
|
||||
match self {
|
||||
WriteFlags::NO_OVERWRITE => unimplemented!(),
|
||||
WriteFlags::NO_DUP_DATA => unimplemented!(),
|
||||
WriteFlags::CURRENT => unimplemented!(),
|
||||
WriteFlags::APPEND => unimplemented!(),
|
||||
WriteFlags::APPEND_DUP => unimplemented!(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use crate::backend::traits::BackendInfo;
|
||||
|
||||
pub struct InfoImpl;
|
||||
|
||||
impl BackendInfo for InfoImpl {
|
||||
fn map_size(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn last_pgno(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn last_txnid(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn max_readers(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn num_readers(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use super::ErrorImpl;
|
||||
use crate::backend::traits::BackendIter;
|
||||
|
||||
// FIXME: Use generics instead.
|
||||
pub struct IterImpl<'i>(pub(crate) Box<dyn Iterator<Item = (&'i [u8], &'i [u8])> + 'i>);
|
||||
|
||||
impl<'i> BackendIter<'i> for IterImpl<'i> {
|
||||
type Error = ErrorImpl;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn next(&mut self) -> Option<Result<(&'i [u8], &'i [u8]), Self::Error>> {
|
||||
self.0.next().map(Ok)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
collections::{
|
||||
BTreeMap,
|
||||
BTreeSet,
|
||||
},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use serde_derive::{
|
||||
Deserialize,
|
||||
Serialize,
|
||||
};
|
||||
|
||||
use super::DatabaseFlagsImpl;
|
||||
|
||||
type Key = Box<[u8]>;
|
||||
type Value = Box<[u8]>;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Snapshot {
|
||||
flags: DatabaseFlagsImpl,
|
||||
#[cfg(not(feature = "db-dup-sort"))]
|
||||
map: Arc<BTreeMap<Key, Value>>,
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
map: Arc<BTreeMap<Key, BTreeSet<Value>>>,
|
||||
}
|
||||
|
||||
impl Snapshot {
|
||||
pub(crate) fn new(flags: Option<DatabaseFlagsImpl>) -> Snapshot {
|
||||
Snapshot {
|
||||
flags: flags.unwrap_or_else(DatabaseFlagsImpl::default),
|
||||
map: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn flags(&self) -> &DatabaseFlagsImpl {
|
||||
&self.flags
|
||||
}
|
||||
|
||||
pub(crate) fn clear(&mut self) {
|
||||
self.map = Default::default();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "db-dup-sort"))]
|
||||
impl Snapshot {
|
||||
pub(crate) fn get(&self, key: &[u8]) -> Option<&[u8]> {
|
||||
self.map.get(key).map(|value| value.as_ref())
|
||||
}
|
||||
|
||||
pub(crate) fn put(&mut self, key: &[u8], value: &[u8]) {
|
||||
let map = Arc::make_mut(&mut self.map);
|
||||
map.insert(Box::from(key), Box::from(value));
|
||||
}
|
||||
|
||||
pub(crate) fn del(&mut self, key: &[u8]) -> Option<()> {
|
||||
let map = Arc::make_mut(&mut self.map);
|
||||
map.remove(key).map(|_| ())
|
||||
}
|
||||
|
||||
pub(crate) fn iter(&self) -> impl Iterator<Item = (&[u8], &[u8])> {
|
||||
self.map.iter().map(|(key, value)| (key.as_ref(), value.as_ref()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
impl Snapshot {
|
||||
pub(crate) fn get(&self, key: &[u8]) -> Option<&[u8]> {
|
||||
self.map.get(key).and_then(|v| v.iter().next()).map(|v| v.as_ref())
|
||||
}
|
||||
|
||||
pub(crate) fn put(&mut self, key: &[u8], value: &[u8]) {
|
||||
let map = Arc::make_mut(&mut self.map);
|
||||
match map.get_mut(key) {
|
||||
None => {
|
||||
let mut values = BTreeSet::new();
|
||||
values.insert(Box::from(value));
|
||||
map.insert(Box::from(key), values);
|
||||
},
|
||||
Some(values) => {
|
||||
values.clear();
|
||||
values.insert(Box::from(value));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn del(&mut self, key: &[u8]) -> Option<()> {
|
||||
let map = Arc::make_mut(&mut self.map);
|
||||
match map.get_mut(key) {
|
||||
None => None,
|
||||
Some(values) => {
|
||||
let was_empty = values.is_empty();
|
||||
values.clear();
|
||||
Some(()).filter(|_| !was_empty)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn iter(&self) -> impl Iterator<Item = (&[u8], impl Iterator<Item = &[u8]>)> {
|
||||
self.map.iter().map(|(key, values)| (key.as_ref(), values.iter().map(|value| value.as_ref())))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
impl Snapshot {
|
||||
pub(crate) fn put_dup(&mut self, key: &[u8], value: &[u8]) {
|
||||
let map = Arc::make_mut(&mut self.map);
|
||||
match map.get_mut(key) {
|
||||
None => {
|
||||
let mut values = BTreeSet::new();
|
||||
values.insert(Box::from(value));
|
||||
map.insert(Box::from(key), values);
|
||||
},
|
||||
Some(values) => {
|
||||
values.insert(Box::from(value));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn del_exact(&mut self, key: &[u8], value: &[u8]) -> Option<()> {
|
||||
let map = Arc::make_mut(&mut self.map);
|
||||
match map.get_mut(key) {
|
||||
None => None,
|
||||
Some(values) => {
|
||||
let was_removed = values.remove(value);
|
||||
Some(()).filter(|_| was_removed)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use crate::backend::traits::BackendStat;
|
||||
|
||||
pub struct StatImpl;
|
||||
|
||||
impl BackendStat for StatImpl {
|
||||
fn page_size(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn depth(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn branch_pages(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn leaf_pages(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn overflow_pages(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn entries(&self) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use super::{
|
||||
snapshot::Snapshot,
|
||||
DatabaseImpl,
|
||||
EnvironmentImpl,
|
||||
ErrorImpl,
|
||||
RoCursorImpl,
|
||||
WriteFlagsImpl,
|
||||
};
|
||||
use crate::backend::traits::{
|
||||
BackendRoCursorTransaction,
|
||||
BackendRoTransaction,
|
||||
BackendRwCursorTransaction,
|
||||
BackendRwTransaction,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RoTransactionImpl<'t> {
|
||||
env: &'t EnvironmentImpl,
|
||||
snapshots: HashMap<DatabaseImpl, Snapshot>,
|
||||
idx: Arc<()>,
|
||||
}
|
||||
|
||||
impl<'t> RoTransactionImpl<'t> {
|
||||
pub(crate) fn new(env: &'t EnvironmentImpl, idx: Arc<()>) -> Result<RoTransactionImpl<'t>, ErrorImpl> {
|
||||
let snapshots = env.dbs()?.iter().map(|(id, db)| (DatabaseImpl(id), db.snapshot())).collect();
|
||||
Ok(RoTransactionImpl {
|
||||
env,
|
||||
snapshots,
|
||||
idx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'t> BackendRoTransaction for RoTransactionImpl<'t> {
|
||||
type Database = DatabaseImpl;
|
||||
type Error = ErrorImpl;
|
||||
|
||||
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {
|
||||
let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
snapshot.get(key).ok_or_else(|| ErrorImpl::KeyValuePairNotFound)
|
||||
}
|
||||
|
||||
fn abort(self) {
|
||||
// noop
|
||||
}
|
||||
}
|
||||
|
||||
impl<'t> BackendRoCursorTransaction<'t> for RoTransactionImpl<'t> {
|
||||
type RoCursor = RoCursorImpl<'t>;
|
||||
|
||||
fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> {
|
||||
let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
Ok(RoCursorImpl(snapshot))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RwTransactionImpl<'t> {
|
||||
env: &'t EnvironmentImpl,
|
||||
snapshots: HashMap<DatabaseImpl, Snapshot>,
|
||||
idx: Arc<()>,
|
||||
}
|
||||
|
||||
impl<'t> RwTransactionImpl<'t> {
|
||||
pub(crate) fn new(env: &'t EnvironmentImpl, idx: Arc<()>) -> Result<RwTransactionImpl<'t>, ErrorImpl> {
|
||||
let snapshots = env.dbs()?.iter().map(|(id, db)| (DatabaseImpl(id), db.snapshot())).collect();
|
||||
Ok(RwTransactionImpl {
|
||||
env,
|
||||
snapshots,
|
||||
idx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'t> BackendRwTransaction for RwTransactionImpl<'t> {
|
||||
type Database = DatabaseImpl;
|
||||
type Error = ErrorImpl;
|
||||
type Flags = WriteFlagsImpl;
|
||||
|
||||
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {
|
||||
let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
snapshot.get(key).ok_or_else(|| ErrorImpl::KeyValuePairNotFound)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "db-dup-sort"))]
|
||||
fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], _flags: Self::Flags) -> Result<(), Self::Error> {
|
||||
let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
snapshot.put(key, value);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], _flags: Self::Flags) -> Result<(), Self::Error> {
|
||||
use super::DatabaseFlagsImpl;
|
||||
let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
if snapshot.flags().contains(DatabaseFlagsImpl::DUP_SORT) {
|
||||
snapshot.put_dup(key, value);
|
||||
} else {
|
||||
snapshot.put(key, value);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "db-dup-sort"))]
|
||||
fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error> {
|
||||
let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
let deleted = snapshot.del(key);
|
||||
Ok(deleted.ok_or_else(|| ErrorImpl::KeyValuePairNotFound)?)
|
||||
}
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
fn del(&mut self, db: &Self::Database, key: &[u8], value: Option<&[u8]>) -> Result<(), Self::Error> {
|
||||
use super::DatabaseFlagsImpl;
|
||||
let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
let deleted = match (value, snapshot.flags()) {
|
||||
(Some(value), flags) if flags.contains(DatabaseFlagsImpl::DUP_SORT) => snapshot.del_exact(key, value),
|
||||
_ => snapshot.del(key),
|
||||
};
|
||||
Ok(deleted.ok_or_else(|| ErrorImpl::KeyValuePairNotFound)?)
|
||||
}
|
||||
|
||||
fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error> {
|
||||
let snapshot = self.snapshots.get_mut(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
snapshot.clear();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn commit(self) -> Result<(), Self::Error> {
|
||||
let mut dbs = self.env.dbs_mut()?;
|
||||
|
||||
for (id, snapshot) in self.snapshots {
|
||||
let db = dbs.get_mut(id.0).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
db.replace(snapshot);
|
||||
}
|
||||
|
||||
drop(dbs);
|
||||
self.env.write_to_disk()
|
||||
}
|
||||
|
||||
fn abort(self) {
|
||||
// noop
|
||||
}
|
||||
}
|
||||
|
||||
impl<'t> BackendRwCursorTransaction<'t> for RwTransactionImpl<'t> {
|
||||
type RoCursor = RoCursorImpl<'t>;
|
||||
|
||||
fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> {
|
||||
let snapshot = self.snapshots.get(db).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
Ok(RoCursorImpl(snapshot))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,195 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
fmt::{
|
||||
Debug,
|
||||
Display,
|
||||
},
|
||||
path::{
|
||||
Path,
|
||||
PathBuf,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
backend::common::{
|
||||
DatabaseFlags,
|
||||
EnvironmentFlags,
|
||||
WriteFlags,
|
||||
},
|
||||
error::StoreError,
|
||||
};
|
||||
|
||||
pub trait BackendError: Debug + Display + Into<StoreError> {}
|
||||
|
||||
pub trait BackendDatabase: Debug + Eq + PartialEq + Copy + Clone {}
|
||||
|
||||
pub trait BackendFlags: Debug + Eq + PartialEq + Copy + Clone + Default {
|
||||
fn empty() -> Self;
|
||||
}
|
||||
|
||||
pub trait BackendEnvironmentFlags: BackendFlags {
|
||||
fn set(&mut self, flag: EnvironmentFlags, value: bool);
|
||||
}
|
||||
|
||||
pub trait BackendDatabaseFlags: BackendFlags {
|
||||
fn set(&mut self, flag: DatabaseFlags, value: bool);
|
||||
}
|
||||
|
||||
pub trait BackendWriteFlags: BackendFlags {
|
||||
fn set(&mut self, flag: WriteFlags, value: bool);
|
||||
}
|
||||
|
||||
pub trait BackendStat {
|
||||
fn page_size(&self) -> usize;
|
||||
|
||||
fn depth(&self) -> usize;
|
||||
|
||||
fn branch_pages(&self) -> usize;
|
||||
|
||||
fn leaf_pages(&self) -> usize;
|
||||
|
||||
fn overflow_pages(&self) -> usize;
|
||||
|
||||
fn entries(&self) -> usize;
|
||||
}
|
||||
|
||||
pub trait BackendInfo {
|
||||
fn map_size(&self) -> usize;
|
||||
|
||||
fn last_pgno(&self) -> usize;
|
||||
|
||||
fn last_txnid(&self) -> usize;
|
||||
|
||||
fn max_readers(&self) -> usize;
|
||||
|
||||
fn num_readers(&self) -> usize;
|
||||
}
|
||||
|
||||
pub trait BackendEnvironmentBuilder<'b>: Debug + Eq + PartialEq + Copy + Clone {
|
||||
type Error: BackendError;
|
||||
type Environment: BackendEnvironment<'b>;
|
||||
type Flags: BackendEnvironmentFlags;
|
||||
|
||||
fn new() -> Self;
|
||||
|
||||
fn set_flags<T>(&mut self, flags: T) -> &mut Self
|
||||
where
|
||||
T: Into<Self::Flags>;
|
||||
|
||||
fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self;
|
||||
|
||||
fn set_max_readers(&mut self, max_readers: u32) -> &mut Self;
|
||||
|
||||
fn set_map_size(&mut self, size: usize) -> &mut Self;
|
||||
|
||||
fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self;
|
||||
|
||||
fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error>;
|
||||
}
|
||||
|
||||
pub trait BackendEnvironment<'e>: Debug {
|
||||
type Error: BackendError;
|
||||
type Database: BackendDatabase;
|
||||
type Flags: BackendDatabaseFlags;
|
||||
type Stat: BackendStat;
|
||||
type Info: BackendInfo;
|
||||
type RoTransaction: BackendRoCursorTransaction<'e, Database = Self::Database>;
|
||||
type RwTransaction: BackendRwCursorTransaction<'e, Database = Self::Database>;
|
||||
|
||||
fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error>;
|
||||
|
||||
fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error>;
|
||||
|
||||
fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error>;
|
||||
|
||||
fn begin_ro_txn(&'e self) -> Result<Self::RoTransaction, Self::Error>;
|
||||
|
||||
fn begin_rw_txn(&'e self) -> Result<Self::RwTransaction, Self::Error>;
|
||||
|
||||
fn sync(&self, force: bool) -> Result<(), Self::Error>;
|
||||
|
||||
fn stat(&self) -> Result<Self::Stat, Self::Error>;
|
||||
|
||||
fn info(&self) -> Result<Self::Info, Self::Error>;
|
||||
|
||||
fn freelist(&self) -> Result<usize, Self::Error>;
|
||||
|
||||
fn load_ratio(&self) -> Result<Option<f32>, Self::Error>;
|
||||
|
||||
fn set_map_size(&self, size: usize) -> Result<(), Self::Error>;
|
||||
|
||||
fn get_files_on_disk(&self) -> Vec<PathBuf>;
|
||||
}
|
||||
|
||||
pub trait BackendRoTransaction: Debug {
|
||||
type Error: BackendError;
|
||||
type Database: BackendDatabase;
|
||||
|
||||
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error>;
|
||||
|
||||
fn abort(self);
|
||||
}
|
||||
|
||||
pub trait BackendRwTransaction: Debug {
|
||||
type Error: BackendError;
|
||||
type Database: BackendDatabase;
|
||||
type Flags: BackendWriteFlags;
|
||||
|
||||
fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error>;
|
||||
|
||||
fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], flags: Self::Flags) -> Result<(), Self::Error>;
|
||||
|
||||
#[cfg(not(feature = "db-dup-sort"))]
|
||||
fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error>;
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
fn del(&mut self, db: &Self::Database, key: &[u8], value: Option<&[u8]>) -> Result<(), Self::Error>;
|
||||
|
||||
fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error>;
|
||||
|
||||
fn commit(self) -> Result<(), Self::Error>;
|
||||
|
||||
fn abort(self);
|
||||
}
|
||||
|
||||
pub trait BackendRoCursorTransaction<'t>: BackendRoTransaction {
|
||||
type RoCursor: BackendRoCursor<'t>;
|
||||
|
||||
fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error>;
|
||||
}
|
||||
|
||||
pub trait BackendRwCursorTransaction<'t>: BackendRwTransaction {
|
||||
type RoCursor: BackendRoCursor<'t>;
|
||||
|
||||
fn open_ro_cursor(&'t self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error>;
|
||||
}
|
||||
|
||||
pub trait BackendRoCursor<'c>: Debug {
|
||||
type Iter: BackendIter<'c>;
|
||||
|
||||
fn into_iter(self) -> Self::Iter;
|
||||
|
||||
fn into_iter_from<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c;
|
||||
|
||||
fn into_iter_dup_of<K>(self, key: K) -> Self::Iter
|
||||
where
|
||||
K: AsRef<[u8]> + 'c;
|
||||
}
|
||||
|
||||
pub trait BackendIter<'i> {
|
||||
type Error: BackendError;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn next(&mut self) -> Option<Result<(&'i [u8], &'i [u8]), Self::Error>>;
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
env::args,
|
||||
io,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use rkv::migrator::{
|
||||
LmdbArchMigrateError,
|
||||
LmdbArchMigrator,
|
||||
};
|
||||
|
||||
fn main() -> Result<(), LmdbArchMigrateError> {
|
||||
let mut cli_args = args();
|
||||
let mut db_name = None;
|
||||
let mut env_path = None;
|
||||
|
||||
// The first arg is the name of the program, which we can ignore.
|
||||
cli_args.next();
|
||||
|
||||
while let Some(arg) = cli_args.next() {
|
||||
if &arg[0..1] == "-" {
|
||||
match &arg[1..] {
|
||||
"s" => {
|
||||
db_name = match cli_args.next() {
|
||||
None => return Err("-s must be followed by database name".into()),
|
||||
Some(str) => Some(str),
|
||||
};
|
||||
},
|
||||
str => return Err(format!("arg -{} not recognized", str).into()),
|
||||
}
|
||||
} else {
|
||||
if env_path.is_some() {
|
||||
return Err("must provide only one path to the LMDB environment".into());
|
||||
}
|
||||
env_path = Some(arg);
|
||||
}
|
||||
}
|
||||
|
||||
let env_path = env_path.ok_or("must provide a path to the LMDB environment")?;
|
||||
let mut migrator = LmdbArchMigrator::new(Path::new(&env_path))?;
|
||||
migrator.dump(db_name.as_deref(), io::stdout()).unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
//! A command-line utility to create an LMDB environment containing random data.
|
||||
//! It requires one flag, `-s path/to/environment`, which specifies the location
|
||||
//! where the tool should create the environment. Optionally, you may specify
|
||||
//! the number of key/value pairs to create via the `-n <number>` flag
|
||||
//! (for which the default value is 50).
|
||||
|
||||
use std::{
|
||||
env::args,
|
||||
fs,
|
||||
fs::File,
|
||||
io::Read,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use rkv::{
|
||||
backend::{
|
||||
BackendEnvironmentBuilder,
|
||||
Lmdb,
|
||||
},
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
Value,
|
||||
};
|
||||
|
||||
fn main() {
|
||||
let mut args = args();
|
||||
let mut database = None;
|
||||
let mut path = None;
|
||||
let mut num_pairs = 50;
|
||||
|
||||
// The first arg is the name of the program, which we can ignore.
|
||||
args.next();
|
||||
|
||||
while let Some(arg) = args.next() {
|
||||
if &arg[0..1] == "-" {
|
||||
match &arg[1..] {
|
||||
"s" => {
|
||||
database = match args.next() {
|
||||
None => panic!("-s must be followed by database arg"),
|
||||
Some(str) => Some(str),
|
||||
};
|
||||
},
|
||||
"n" => {
|
||||
num_pairs = match args.next() {
|
||||
None => panic!("-s must be followed by number of pairs"),
|
||||
Some(str) => str.parse().expect("number"),
|
||||
};
|
||||
},
|
||||
str => panic!("arg -{} not recognized", str),
|
||||
}
|
||||
} else {
|
||||
if path.is_some() {
|
||||
panic!("must provide only one path to the LMDB environment");
|
||||
}
|
||||
path = Some(arg);
|
||||
}
|
||||
}
|
||||
|
||||
if path.is_none() {
|
||||
panic!("must provide a path to the LMDB environment");
|
||||
}
|
||||
|
||||
let path = path.unwrap();
|
||||
fs::create_dir_all(&path).expect("dir created");
|
||||
|
||||
let mut builder = Rkv::environment_builder::<Lmdb>();
|
||||
builder.set_max_dbs(2);
|
||||
// Allocate enough map to accommodate the largest random collection.
|
||||
// We currently do this by allocating twice the maximum possible size
|
||||
// of the pairs (assuming maximum key and value sizes).
|
||||
builder.set_map_size((511 + 65535) * num_pairs * 2);
|
||||
let rkv = Rkv::from_builder(Path::new(&path), builder).expect("Rkv");
|
||||
let store = rkv.open_single(database.as_deref(), StoreOptions::create()).expect("opened");
|
||||
let mut writer = rkv.write().expect("writer");
|
||||
|
||||
// Generate random values for the number of keys and key/value lengths.
|
||||
// On Linux, "Just use /dev/urandom!" <https://www.2uo.de/myths-about-urandom/>.
|
||||
// On macOS it doesn't matter (/dev/random and /dev/urandom are identical).
|
||||
let mut random = File::open("/dev/urandom").unwrap();
|
||||
let mut nums = [0u8; 4];
|
||||
random.read_exact(&mut nums).unwrap();
|
||||
|
||||
// Generate 0–255 pairs.
|
||||
for _ in 0..num_pairs {
|
||||
// Generate key and value lengths. The key must be 1–511 bytes long.
|
||||
// The value length can be 0 and is essentially unbounded; we generate
|
||||
// value lengths of 0–0xffff (65535).
|
||||
// NB: the modulus method for generating a random number within a range
|
||||
// introduces distribution skew, but we don't need it to be perfect.
|
||||
let key_len = ((u16::from(nums[0]) + (u16::from(nums[1]) << 8)) % 511 + 1) as usize;
|
||||
let value_len = (u16::from(nums[2]) + (u16::from(nums[3]) << 8)) as usize;
|
||||
|
||||
let mut key: Vec<u8> = vec![0; key_len];
|
||||
random.read_exact(&mut key[0..key_len]).unwrap();
|
||||
|
||||
let mut value: Vec<u8> = vec![0; value_len];
|
||||
random.read_exact(&mut value[0..value_len]).unwrap();
|
||||
|
||||
store.put(&mut writer, key, &Value::Blob(&value)).expect("wrote");
|
||||
}
|
||||
|
||||
writer.commit().expect("committed");
|
||||
}
|
|
@ -0,0 +1,326 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
fs,
|
||||
os::raw::c_uint,
|
||||
path::{
|
||||
Path,
|
||||
PathBuf,
|
||||
},
|
||||
};
|
||||
|
||||
#[cfg(any(feature = "db-dup-sort", feature = "db-int-key"))]
|
||||
use crate::backend::{
|
||||
BackendDatabaseFlags,
|
||||
DatabaseFlags,
|
||||
};
|
||||
use crate::{
|
||||
backend::{
|
||||
BackendEnvironment,
|
||||
BackendEnvironmentBuilder,
|
||||
BackendRoCursorTransaction,
|
||||
BackendRwCursorTransaction,
|
||||
SafeModeError,
|
||||
},
|
||||
error::StoreError,
|
||||
readwrite::{
|
||||
Reader,
|
||||
Writer,
|
||||
},
|
||||
store::{
|
||||
single::SingleStore,
|
||||
Options as StoreOptions,
|
||||
},
|
||||
};
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
use crate::store::multi::MultiStore;
|
||||
|
||||
#[cfg(feature = "db-int-key")]
|
||||
use crate::store::integer::IntegerStore;
|
||||
#[cfg(feature = "db-int-key")]
|
||||
use crate::store::keys::PrimitiveInt;
|
||||
|
||||
#[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))]
|
||||
use crate::store::integermulti::MultiIntegerStore;
|
||||
|
||||
pub static DEFAULT_MAX_DBS: c_uint = 5;
|
||||
|
||||
/// Wrapper around an `Environment` (e.g. such as an `LMDB` or `SafeMode` environment).
|
||||
#[derive(Debug)]
|
||||
pub struct Rkv<E> {
|
||||
path: PathBuf,
|
||||
env: E,
|
||||
}
|
||||
|
||||
/// Static methods.
|
||||
impl<'e, E> Rkv<E>
|
||||
where
|
||||
E: BackendEnvironment<'e>,
|
||||
{
|
||||
pub fn environment_builder<B>() -> B
|
||||
where
|
||||
B: BackendEnvironmentBuilder<'e, Environment = E>,
|
||||
{
|
||||
B::new()
|
||||
}
|
||||
|
||||
/// Return a new Rkv environment that supports up to `DEFAULT_MAX_DBS` open databases.
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new<B>(path: &Path) -> Result<Rkv<E>, StoreError>
|
||||
where
|
||||
B: BackendEnvironmentBuilder<'e, Environment = E>,
|
||||
{
|
||||
Rkv::with_capacity::<B>(path, DEFAULT_MAX_DBS)
|
||||
}
|
||||
|
||||
/// Return a new Rkv environment that supports the specified number of open databases.
|
||||
pub fn with_capacity<B>(path: &Path, max_dbs: c_uint) -> Result<Rkv<E>, StoreError>
|
||||
where
|
||||
B: BackendEnvironmentBuilder<'e, Environment = E>,
|
||||
{
|
||||
let mut builder = B::new();
|
||||
builder.set_max_dbs(max_dbs);
|
||||
|
||||
// Future: set flags, maximum size, etc. here if necessary.
|
||||
Rkv::from_builder(path, builder)
|
||||
}
|
||||
|
||||
/// Return a new Rkv environment from the provided builder.
|
||||
pub fn from_builder<B>(path: &Path, builder: B) -> Result<Rkv<E>, StoreError>
|
||||
where
|
||||
B: BackendEnvironmentBuilder<'e, Environment = E>,
|
||||
{
|
||||
Ok(Rkv {
|
||||
path: path.into(),
|
||||
env: builder.open(path).map_err(|e| e.into())?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Store creation methods.
|
||||
impl<'e, E> Rkv<E>
|
||||
where
|
||||
E: BackendEnvironment<'e>,
|
||||
{
|
||||
/// Return all created databases.
|
||||
pub fn get_dbs(&self) -> Result<Vec<Option<String>>, StoreError> {
|
||||
self.env.get_dbs().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Create or Open an existing database in (&[u8] -> Single Value) mode.
|
||||
/// Note: that create=true cannot be called concurrently with other operations so if
|
||||
/// you are sure that the database exists, call this with create=false.
|
||||
pub fn open_single<'s, T>(
|
||||
&self,
|
||||
name: T,
|
||||
opts: StoreOptions<E::Flags>,
|
||||
) -> Result<SingleStore<E::Database>, StoreError>
|
||||
where
|
||||
T: Into<Option<&'s str>>,
|
||||
{
|
||||
self.open(name, opts).map(SingleStore::new)
|
||||
}
|
||||
|
||||
/// Create or Open an existing database in (Integer -> Single Value) mode.
|
||||
/// Note: that create=true cannot be called concurrently with other operations so if
|
||||
/// you are sure that the database exists, call this with create=false.
|
||||
#[cfg(feature = "db-int-key")]
|
||||
pub fn open_integer<'s, T, K>(
|
||||
&self,
|
||||
name: T,
|
||||
mut opts: StoreOptions<E::Flags>,
|
||||
) -> Result<IntegerStore<E::Database, K>, StoreError>
|
||||
where
|
||||
K: PrimitiveInt,
|
||||
T: Into<Option<&'s str>>,
|
||||
{
|
||||
opts.flags.set(DatabaseFlags::INTEGER_KEY, true);
|
||||
self.open(name, opts).map(IntegerStore::new)
|
||||
}
|
||||
|
||||
/// Create or Open an existing database in (&[u8] -> Multiple Values) mode.
|
||||
/// Note: that create=true cannot be called concurrently with other operations so if
|
||||
/// you are sure that the database exists, call this with create=false.
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
pub fn open_multi<'s, T>(
|
||||
&self,
|
||||
name: T,
|
||||
mut opts: StoreOptions<E::Flags>,
|
||||
) -> Result<MultiStore<E::Database>, StoreError>
|
||||
where
|
||||
T: Into<Option<&'s str>>,
|
||||
{
|
||||
opts.flags.set(DatabaseFlags::DUP_SORT, true);
|
||||
self.open(name, opts).map(MultiStore::new)
|
||||
}
|
||||
|
||||
/// Create or Open an existing database in (Integer -> Multiple Values) mode.
|
||||
/// Note: that create=true cannot be called concurrently with other operations so if
|
||||
/// you are sure that the database exists, call this with create=false.
|
||||
#[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))]
|
||||
pub fn open_multi_integer<'s, T, K>(
|
||||
&self,
|
||||
name: T,
|
||||
mut opts: StoreOptions<E::Flags>,
|
||||
) -> Result<MultiIntegerStore<E::Database, K>, StoreError>
|
||||
where
|
||||
K: PrimitiveInt,
|
||||
T: Into<Option<&'s str>>,
|
||||
{
|
||||
opts.flags.set(DatabaseFlags::INTEGER_KEY, true);
|
||||
opts.flags.set(DatabaseFlags::DUP_SORT, true);
|
||||
self.open(name, opts).map(MultiIntegerStore::new)
|
||||
}
|
||||
|
||||
fn open<'s, T>(&self, name: T, opts: StoreOptions<E::Flags>) -> Result<E::Database, StoreError>
|
||||
where
|
||||
T: Into<Option<&'s str>>,
|
||||
{
|
||||
if opts.create {
|
||||
self.env.create_db(name.into(), opts.flags).map_err(|e| {
|
||||
match e.into() {
|
||||
StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(),
|
||||
StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(),
|
||||
e => e,
|
||||
}
|
||||
})
|
||||
} else {
|
||||
self.env.open_db(name.into()).map_err(|e| {
|
||||
match e.into() {
|
||||
StoreError::LmdbError(lmdb::Error::BadRslot) => StoreError::open_during_transaction(),
|
||||
StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => StoreError::open_during_transaction(),
|
||||
e => e,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read and write accessors.
|
||||
impl<'e, E> Rkv<E>
|
||||
where
|
||||
E: BackendEnvironment<'e>,
|
||||
{
|
||||
/// Create a read transaction. There can be multiple concurrent readers for an
|
||||
/// environment, up to the maximum specified by LMDB (default 126), and you can open
|
||||
/// readers while a write transaction is active.
|
||||
pub fn read<T>(&'e self) -> Result<Reader<T>, StoreError>
|
||||
where
|
||||
E: BackendEnvironment<'e, RoTransaction = T>,
|
||||
T: BackendRoCursorTransaction<'e, Database = E::Database>,
|
||||
{
|
||||
Ok(Reader::new(self.env.begin_ro_txn().map_err(|e| e.into())?))
|
||||
}
|
||||
|
||||
/// Create a write transaction. There can be only one write transaction active at any
|
||||
/// given time, so trying to create a second one will block until the first is
|
||||
/// committed or aborted.
|
||||
pub fn write<T>(&'e self) -> Result<Writer<T>, StoreError>
|
||||
where
|
||||
E: BackendEnvironment<'e, RwTransaction = T>,
|
||||
T: BackendRwCursorTransaction<'e, Database = E::Database>,
|
||||
{
|
||||
Ok(Writer::new(self.env.begin_rw_txn().map_err(|e| e.into())?))
|
||||
}
|
||||
}
|
||||
|
||||
/// Other environment methods.
|
||||
impl<'e, E> Rkv<E>
|
||||
where
|
||||
E: BackendEnvironment<'e>,
|
||||
{
|
||||
/// Flush the data buffers to disk. This call is only useful, when the environment was
|
||||
/// open with either `NO_SYNC`, `NO_META_SYNC` or `MAP_ASYNC` (see below). The call is
|
||||
/// not valid if the environment was opened with `READ_ONLY`.
|
||||
///
|
||||
/// Data is always written to disk when `transaction.commit()` is called, but the
|
||||
/// operating system may keep it buffered. LMDB always flushes the OS buffers upon
|
||||
/// commit as well, unless the environment was opened with `NO_SYNC` or in part
|
||||
/// `NO_META_SYNC`.
|
||||
///
|
||||
/// `force`: if true, force a synchronous flush. Otherwise if the environment has the
|
||||
/// `NO_SYNC` flag set the flushes will be omitted, and with `MAP_ASYNC` they will
|
||||
/// be asynchronous.
|
||||
pub fn sync(&self, force: bool) -> Result<(), StoreError> {
|
||||
self.env.sync(force).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Retrieve statistics about this environment.
|
||||
///
|
||||
/// It includes:
|
||||
/// * Page size in bytes
|
||||
/// * B-tree depth
|
||||
/// * Number of internal (non-leaf) pages
|
||||
/// * Number of leaf pages
|
||||
/// * Number of overflow pages
|
||||
/// * Number of data entries
|
||||
pub fn stat(&self) -> Result<E::Stat, StoreError> {
|
||||
self.env.stat().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Retrieve information about this environment.
|
||||
///
|
||||
/// It includes:
|
||||
/// * Map size in bytes
|
||||
/// * The last used page number
|
||||
/// * The last transaction ID
|
||||
/// * Max number of readers allowed
|
||||
/// * Number of readers in use
|
||||
pub fn info(&self) -> Result<E::Info, StoreError> {
|
||||
self.env.info().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Retrieve the load ratio (# of used pages / total pages) about this environment.
|
||||
///
|
||||
/// With the formular: (last_page_no - freelist_pages) / total_pages.
|
||||
/// A value of `None` means that the backend doesn't ever need to be resized.
|
||||
pub fn load_ratio(&self) -> Result<Option<f32>, StoreError> {
|
||||
self.env.load_ratio().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Sets the size of the memory map to use for the environment.
|
||||
///
|
||||
/// This can be used to resize the map when the environment is already open. You can
|
||||
/// also use `Rkv::environment_builder()` to set the map size during the `Rkv`
|
||||
/// initialization.
|
||||
///
|
||||
/// Note:
|
||||
///
|
||||
/// * No active transactions allowed when performing resizing in this process. It's up
|
||||
/// to the consumer to enforce that.
|
||||
///
|
||||
/// * The size should be a multiple of the OS page size. Any attempt to set a size
|
||||
/// smaller than the space already consumed by the environment will be silently
|
||||
/// changed to the current size of the used space.
|
||||
///
|
||||
/// * In the multi-process case, once a process resizes the map, other processes need
|
||||
/// to either re-open the environment, or call set_map_size with size 0 to update
|
||||
/// the environment. Otherwise, new transaction creation will fail with
|
||||
/// `LmdbError::MapResized`.
|
||||
pub fn set_map_size(&self, size: usize) -> Result<(), StoreError> {
|
||||
self.env.set_map_size(size).map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Closes this environment and deletes all its files from disk. Doesn't delete the
|
||||
/// folder used when opening the environment.
|
||||
pub fn close_and_delete(self) -> Result<(), StoreError> {
|
||||
let files = self.env.get_files_on_disk();
|
||||
self.sync(true)?;
|
||||
drop(self);
|
||||
|
||||
for file in files {
|
||||
fs::remove_file(file)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
io,
|
||||
path::PathBuf,
|
||||
str,
|
||||
sync,
|
||||
thread,
|
||||
thread::ThreadId,
|
||||
};
|
||||
|
||||
use failure::Fail;
|
||||
|
||||
pub use crate::backend::SafeModeError;
|
||||
use crate::value::Type;
|
||||
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum DataError {
|
||||
#[fail(display = "unknown type tag: {}", _0)]
|
||||
UnknownType(u8),
|
||||
|
||||
#[fail(display = "unexpected type tag: expected {}, got {}", expected, actual)]
|
||||
UnexpectedType {
|
||||
expected: Type,
|
||||
actual: Type,
|
||||
},
|
||||
|
||||
#[fail(display = "empty data; expected tag")]
|
||||
Empty,
|
||||
|
||||
#[fail(display = "invalid value for type {}: {}", value_type, err)]
|
||||
DecodingError {
|
||||
value_type: Type,
|
||||
err: Box<bincode::ErrorKind>,
|
||||
},
|
||||
|
||||
#[fail(display = "couldn't encode value: {}", _0)]
|
||||
EncodingError(Box<bincode::ErrorKind>),
|
||||
|
||||
#[fail(display = "invalid uuid bytes")]
|
||||
InvalidUuid,
|
||||
}
|
||||
|
||||
impl From<Box<bincode::ErrorKind>> for DataError {
|
||||
fn from(e: Box<bincode::ErrorKind>) -> DataError {
|
||||
DataError::EncodingError(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum StoreError {
|
||||
#[fail(display = "manager poisoned")]
|
||||
ManagerPoisonError,
|
||||
|
||||
#[fail(display = "database corrupted")]
|
||||
DatabaseCorrupted,
|
||||
|
||||
#[fail(display = "key/value pair not found")]
|
||||
KeyValuePairNotFound,
|
||||
|
||||
#[fail(display = "unsupported size of key/DB name/data")]
|
||||
KeyValuePairBadSize,
|
||||
|
||||
#[fail(display = "file is not a valid database")]
|
||||
FileInvalid,
|
||||
|
||||
#[fail(display = "environment mapsize reached")]
|
||||
MapFull,
|
||||
|
||||
#[fail(display = "environment maxdbs reached")]
|
||||
DbsFull,
|
||||
|
||||
#[fail(display = "environment maxreaders reached")]
|
||||
ReadersFull,
|
||||
|
||||
#[fail(display = "I/O error: {:?}", _0)]
|
||||
IoError(io::Error),
|
||||
|
||||
#[fail(display = "environment path does not exist or not the right type: {:?}", _0)]
|
||||
UnsuitableEnvironmentPath(PathBuf),
|
||||
|
||||
#[fail(display = "data error: {:?}", _0)]
|
||||
DataError(DataError),
|
||||
|
||||
#[fail(display = "lmdb backend error: {}", _0)]
|
||||
LmdbError(lmdb::Error),
|
||||
|
||||
#[fail(display = "safe mode backend error: {}", _0)]
|
||||
SafeModeError(SafeModeError),
|
||||
|
||||
#[fail(display = "read transaction already exists in thread {:?}", _0)]
|
||||
ReadTransactionAlreadyExists(ThreadId),
|
||||
|
||||
#[fail(display = "attempted to open DB during transaction in thread {:?}", _0)]
|
||||
OpenAttemptedDuringTransaction(ThreadId),
|
||||
}
|
||||
|
||||
impl StoreError {
|
||||
pub fn open_during_transaction() -> StoreError {
|
||||
StoreError::OpenAttemptedDuringTransaction(thread::current().id())
|
||||
}
|
||||
|
||||
pub fn read_transaction_already_exists() -> StoreError {
|
||||
StoreError::ReadTransactionAlreadyExists(thread::current().id())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DataError> for StoreError {
|
||||
fn from(e: DataError) -> StoreError {
|
||||
StoreError::DataError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for StoreError {
|
||||
fn from(e: io::Error) -> StoreError {
|
||||
StoreError::IoError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<sync::PoisonError<T>> for StoreError {
|
||||
fn from(_: sync::PoisonError<T>) -> StoreError {
|
||||
StoreError::ManagerPoisonError
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum MigrateError {
|
||||
#[fail(display = "store error: {}", _0)]
|
||||
StoreError(StoreError),
|
||||
|
||||
#[fail(display = "manager poisoned")]
|
||||
ManagerPoisonError,
|
||||
|
||||
#[fail(display = "source is empty")]
|
||||
SourceEmpty,
|
||||
|
||||
#[fail(display = "destination is not empty")]
|
||||
DestinationNotEmpty,
|
||||
}
|
||||
|
||||
impl From<StoreError> for MigrateError {
|
||||
fn from(e: StoreError) -> MigrateError {
|
||||
MigrateError::StoreError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<sync::PoisonError<T>> for MigrateError {
|
||||
fn from(_: sync::PoisonError<T>) -> MigrateError {
|
||||
MigrateError::ManagerPoisonError
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
io,
|
||||
path::{
|
||||
Path,
|
||||
PathBuf,
|
||||
},
|
||||
};
|
||||
|
||||
use url::Url;
|
||||
|
||||
use crate::{
|
||||
error::StoreError,
|
||||
value::Value,
|
||||
};
|
||||
|
||||
pub(crate) fn read_transform(value: Result<&[u8], StoreError>) -> Result<Value, StoreError> {
|
||||
match value {
|
||||
Ok(bytes) => Value::from_tagged_slice(bytes).map_err(StoreError::DataError),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
// Workaround the UNC path on Windows, see https://github.com/rust-lang/rust/issues/42869.
|
||||
// Otherwise, `Env::from_builder()` will panic with error_no(123).
|
||||
pub(crate) fn canonicalize_path<'p, P>(path: P) -> io::Result<PathBuf>
|
||||
where
|
||||
P: Into<&'p Path>,
|
||||
{
|
||||
let canonical = path.into().canonicalize()?;
|
||||
|
||||
Ok(if cfg!(target_os = "windows") {
|
||||
let map_err = |_| io::Error::new(io::ErrorKind::Other, "path canonicalization error");
|
||||
Url::from_file_path(&canonical).and_then(|url| url.to_file_path()).map_err(map_err)?
|
||||
} else {
|
||||
canonical
|
||||
})
|
||||
}
|
|
@ -0,0 +1,253 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
//! A simple, humane, typed key-value storage solution. It supports multiple backend
|
||||
//! engines with varying guarantees, such as [LMDB](http://www.lmdb.tech/doc/) for
|
||||
//! performance, or "SafeMode" for reliability.
|
||||
//!
|
||||
//! It aims to achieve the following:
|
||||
//!
|
||||
//! - Avoid sharp edges (e.g., obscure error codes for common situations).
|
||||
//! - Report errors via [failure](https://docs.rs/failure/).
|
||||
//! - Correctly restrict access to one handle per process via a
|
||||
//! [Manager](struct.Manager.html).
|
||||
//! - Use Rust's type system to make single-typed key stores safe and ergonomic.
|
||||
//! - Encode and decode values via [bincode](https://docs.rs/bincode/)/[serde](https://docs.rs/serde/)
|
||||
//! and type tags, achieving platform-independent storage and input/output flexibility.
|
||||
//!
|
||||
//! It exposes these primary abstractions:
|
||||
//!
|
||||
//! - [Manager](struct.Manager.html): a singleton that controls access to environments
|
||||
//! - [Rkv](struct.Rkv.html): an environment contains a set of key/value databases
|
||||
//! - [SingleStore](store/single/struct.SingleStore.html): a database contains a set of
|
||||
//! key/value pairs
|
||||
//!
|
||||
//! Keys can be anything that implements `AsRef<[u8]>` or integers
|
||||
//! (when accessing an [IntegerStore](store/integer/struct.IntegerStore.html)).
|
||||
//!
|
||||
//! Values can be any of the types defined by the [Value](value/enum.Value.html) enum,
|
||||
//! including:
|
||||
//!
|
||||
//! - booleans (`Value::Bool`)
|
||||
//! - integers (`Value::I64`, `Value::U64`)
|
||||
//! - floats (`Value::F64`)
|
||||
//! - strings (`Value::Str`)
|
||||
//! - blobs (`Value::Blob`)
|
||||
//!
|
||||
//! See [Value](value/enum.Value.html) for the complete list of supported types.
|
||||
//!
|
||||
//! ## Basic Usage
|
||||
//! ```
|
||||
//! use rkv::{Manager, Rkv, SingleStore, Value, StoreOptions};
|
||||
//! use rkv::backend::{Lmdb, LmdbEnvironment};
|
||||
//! use std::fs;
|
||||
//! use tempfile::Builder;
|
||||
//!
|
||||
//! // First determine the path to the environment, which is represented on disk as a
|
||||
//! // directory containing two files:
|
||||
//! //
|
||||
//! // * a data file containing the key/value stores
|
||||
//! // * a lock file containing metadata about current transactions
|
||||
//! //
|
||||
//! // In this example, we use the `tempfile` crate to create the directory.
|
||||
//! //
|
||||
//! let root = Builder::new().prefix("simple-db").tempdir().unwrap();
|
||||
//! fs::create_dir_all(root.path()).unwrap();
|
||||
//! let path = root.path();
|
||||
//!
|
||||
//! // The `Manager` enforces that each process opens the same environment at most once by
|
||||
//! // caching a handle to each environment that it opens. Use it to retrieve the handle
|
||||
//! // to an opened environment—or create one if it hasn't already been opened:
|
||||
//! let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
|
||||
//! let created_arc = manager.get_or_create(path, Rkv::new::<Lmdb>).unwrap();
|
||||
//! let env = created_arc.read().unwrap();
|
||||
//!
|
||||
//! // Then you can use the environment handle to get a handle to a datastore:
|
||||
//! let store = env.open_single("mydb", StoreOptions::create()).unwrap();
|
||||
//!
|
||||
//! {
|
||||
//! // Use a write transaction to mutate the store via a `Writer`. There can be only
|
||||
//! // one writer for a given environment, so opening a second one will block until
|
||||
//! // the first completes.
|
||||
//! let mut writer = env.write().unwrap();
|
||||
//!
|
||||
//! // Keys are `AsRef<[u8]>`, while values are `Value` enum instances. Use the `Blob`
|
||||
//! // variant to store arbitrary collections of bytes. Putting data returns a
|
||||
//! // `Result<(), StoreError>`, where StoreError is an enum identifying the reason
|
||||
//! // for a failure.
|
||||
//! store.put(&mut writer, "int", &Value::I64(1234)).unwrap();
|
||||
//! store.put(&mut writer, "uint", &Value::U64(1234_u64)).unwrap();
|
||||
//! store.put(&mut writer, "float", &Value::F64(1234.0.into())).unwrap();
|
||||
//! store.put(&mut writer, "instant", &Value::Instant(1528318073700)).unwrap();
|
||||
//! store.put(&mut writer, "boolean", &Value::Bool(true)).unwrap();
|
||||
//! store.put(&mut writer, "string", &Value::Str("Héllo, wörld!")).unwrap();
|
||||
//! store.put(&mut writer, "json", &Value::Json(r#"{"foo":"bar", "number": 1}"#)).unwrap();
|
||||
//! store.put(&mut writer, "blob", &Value::Blob(b"blob")).unwrap();
|
||||
//!
|
||||
//! // You must commit a write transaction before the writer goes out of scope, or the
|
||||
//! // transaction will abort and the data won't persist.
|
||||
//! writer.commit().unwrap();
|
||||
//! }
|
||||
//!
|
||||
//! {
|
||||
//! // Use a read transaction to query the store via a `Reader`. There can be multiple
|
||||
//! // concurrent readers for a store, and readers never block on a writer nor other
|
||||
//! // readers.
|
||||
//! let reader = env.read().expect("reader");
|
||||
//!
|
||||
//! // Keys are `AsRef<u8>`, and the return value is `Result<Option<Value>, StoreError>`.
|
||||
//! println!("Get int {:?}", store.get(&reader, "int").unwrap());
|
||||
//! println!("Get uint {:?}", store.get(&reader, "uint").unwrap());
|
||||
//! println!("Get float {:?}", store.get(&reader, "float").unwrap());
|
||||
//! println!("Get instant {:?}", store.get(&reader, "instant").unwrap());
|
||||
//! println!("Get boolean {:?}", store.get(&reader, "boolean").unwrap());
|
||||
//! println!("Get string {:?}", store.get(&reader, "string").unwrap());
|
||||
//! println!("Get json {:?}", store.get(&reader, "json").unwrap());
|
||||
//! println!("Get blob {:?}", store.get(&reader, "blob").unwrap());
|
||||
//!
|
||||
//! // Retrieving a non-existent value returns `Ok(None)`.
|
||||
//! println!("Get non-existent value {:?}", store.get(&reader, "non-existent").unwrap());
|
||||
//!
|
||||
//! // A read transaction will automatically close once the reader goes out of scope,
|
||||
//! // so isn't necessary to close it explicitly, although you can do so by calling
|
||||
//! // `Reader.abort()`.
|
||||
//! }
|
||||
//!
|
||||
//! {
|
||||
//! // Aborting a write transaction rolls back the change(s).
|
||||
//! let mut writer = env.write().unwrap();
|
||||
//! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
|
||||
//! writer.abort();
|
||||
//! let reader = env.read().expect("reader");
|
||||
//! println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
|
||||
//! }
|
||||
//!
|
||||
//! {
|
||||
//! // Explicitly aborting a transaction is not required unless an early abort is
|
||||
//! // desired, since both read and write transactions will implicitly be aborted once
|
||||
//! // they go out of scope.
|
||||
//! {
|
||||
//! let mut writer = env.write().unwrap();
|
||||
//! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
|
||||
//! }
|
||||
//! let reader = env.read().expect("reader");
|
||||
//! println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
|
||||
//! }
|
||||
//!
|
||||
//! {
|
||||
//! // Deleting a key/value pair also requires a write transaction.
|
||||
//! let mut writer = env.write().unwrap();
|
||||
//! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
|
||||
//! store.put(&mut writer, "bar", &Value::Str("baz")).unwrap();
|
||||
//! store.delete(&mut writer, "foo").unwrap();
|
||||
//!
|
||||
//! // A write transaction also supports reading, and the version of the store that it
|
||||
//! // reads includes the changes it has made regardless of the commit state of that
|
||||
//! // transaction.
|
||||
|
||||
//! // In the code above, "foo" and "bar" were put into the store, then "foo" was
|
||||
//! // deleted so only "bar" will return a result when the database is queried via the
|
||||
//! // writer.
|
||||
//! println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap());
|
||||
//! println!("Get bar ({:?})", store.get(&writer, "bar").unwrap());
|
||||
//!
|
||||
//! // But a reader won't see that change until the write transaction is committed.
|
||||
//! {
|
||||
//! let reader = env.read().expect("reader");
|
||||
//! println!("Get foo {:?}", store.get(&reader, "foo").unwrap());
|
||||
//! println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
|
||||
//! }
|
||||
//! writer.commit().unwrap();
|
||||
//! {
|
||||
//! let reader = env.read().expect("reader");
|
||||
//! println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
|
||||
//! println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
|
||||
//! }
|
||||
//!
|
||||
//! // Committing a transaction consumes the writer, preventing you from reusing it by
|
||||
//! // failing at compile time with an error. This line would report "error[E0382]:
|
||||
//! // borrow of moved value: `writer`".
|
||||
//! // store.put(&mut writer, "baz", &Value::Str("buz")).unwrap();
|
||||
//! }
|
||||
//!
|
||||
//! {
|
||||
//! // Clearing all the entries in the store with a write transaction.
|
||||
//! {
|
||||
//! let mut writer = env.write().unwrap();
|
||||
//! store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
|
||||
//! store.put(&mut writer, "bar", &Value::Str("baz")).unwrap();
|
||||
//! writer.commit().unwrap();
|
||||
//! }
|
||||
//!
|
||||
//! {
|
||||
//! let mut writer = env.write().unwrap();
|
||||
//! store.clear(&mut writer).unwrap();
|
||||
//! writer.commit().unwrap();
|
||||
//! }
|
||||
//!
|
||||
//! {
|
||||
//! let reader = env.read().expect("reader");
|
||||
//! println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
|
||||
//! println!("It should be None! ({:?})", store.get(&reader, "bar").unwrap());
|
||||
//! }
|
||||
//!
|
||||
//! }
|
||||
//!
|
||||
//! ```
|
||||
|
||||
mod env;
|
||||
mod error;
|
||||
mod helpers;
|
||||
mod manager;
|
||||
mod readwrite;
|
||||
|
||||
pub mod backend;
|
||||
pub mod migrator;
|
||||
pub mod store;
|
||||
pub mod value;
|
||||
|
||||
pub use backend::{
|
||||
DatabaseFlags,
|
||||
EnvironmentFlags,
|
||||
WriteFlags,
|
||||
};
|
||||
pub use env::Rkv;
|
||||
pub use error::{
|
||||
DataError,
|
||||
MigrateError,
|
||||
StoreError,
|
||||
};
|
||||
pub use manager::Manager;
|
||||
pub use migrator::Migrator;
|
||||
pub use readwrite::{
|
||||
Readable,
|
||||
Reader,
|
||||
Writer,
|
||||
};
|
||||
pub use store::{
|
||||
keys::EncodableKey,
|
||||
single::SingleStore,
|
||||
Options as StoreOptions,
|
||||
};
|
||||
pub use value::{
|
||||
OwnedValue,
|
||||
Value,
|
||||
};
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
pub use store::multi::MultiStore;
|
||||
|
||||
#[cfg(feature = "db-int-key")]
|
||||
pub use store::integer::IntegerStore;
|
||||
#[cfg(feature = "db-int-key")]
|
||||
pub use store::keys::PrimitiveInt;
|
||||
|
||||
#[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))]
|
||||
pub use store::integermulti::MultiIntegerStore;
|
|
@ -0,0 +1,229 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
collections::{
|
||||
btree_map::Entry,
|
||||
BTreeMap,
|
||||
},
|
||||
os::raw::c_uint,
|
||||
path::{
|
||||
Path,
|
||||
PathBuf,
|
||||
},
|
||||
result,
|
||||
sync::{
|
||||
Arc,
|
||||
RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use crate::{
|
||||
backend::{
|
||||
BackendEnvironment,
|
||||
BackendEnvironmentBuilder,
|
||||
LmdbEnvironment,
|
||||
SafeModeEnvironment,
|
||||
},
|
||||
error::StoreError,
|
||||
helpers::canonicalize_path,
|
||||
Rkv,
|
||||
};
|
||||
|
||||
type Result<T> = result::Result<T, StoreError>;
|
||||
type SharedRkv<E> = Arc<RwLock<Rkv<E>>>;
|
||||
|
||||
lazy_static! {
|
||||
static ref MANAGER_LMDB: RwLock<Manager<LmdbEnvironment>> = RwLock::new(Manager::new());
|
||||
static ref MANAGER_SAFE_MODE: RwLock<Manager<SafeModeEnvironment>> = RwLock::new(Manager::new());
|
||||
}
|
||||
|
||||
/// A process is only permitted to have one open handle to each Rkv environment. This
|
||||
/// manager exists to enforce that constraint: don't open environments directly.
|
||||
///
|
||||
/// By default, path canonicalization is enabled for identifying RKV instances. This
|
||||
/// is true by default, because it helps enforce the constraints guaranteed by
|
||||
/// this manager. However, path canonicalization might crash in some fringe
|
||||
/// circumstances, so the `no-canonicalize-path` feature offers the possibility of
|
||||
/// disabling it. See: https://bugzilla.mozilla.org/show_bug.cgi?id=1531887
|
||||
///
|
||||
/// When path canonicalization is disabled, you *must* ensure an RKV environment is
|
||||
/// always created or retrieved with the same path.
|
||||
pub struct Manager<E> {
|
||||
environments: BTreeMap<PathBuf, SharedRkv<E>>,
|
||||
}
|
||||
|
||||
impl<'e, E> Manager<E>
|
||||
where
|
||||
E: BackendEnvironment<'e>,
|
||||
{
|
||||
fn new() -> Manager<E> {
|
||||
Manager {
|
||||
environments: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the open env at `path`, returning `None` if it has not already been opened.
|
||||
pub fn get<'p, P>(&self, path: P) -> Result<Option<SharedRkv<E>>>
|
||||
where
|
||||
P: Into<&'p Path>,
|
||||
{
|
||||
let canonical = if cfg!(feature = "no-canonicalize-path") {
|
||||
path.into().to_path_buf()
|
||||
} else {
|
||||
canonicalize_path(path)?
|
||||
};
|
||||
Ok(self.environments.get(&canonical).cloned())
|
||||
}
|
||||
|
||||
/// Return the open env at `path`, or create it by calling `f`.
|
||||
pub fn get_or_create<'p, F, P>(&mut self, path: P, f: F) -> Result<SharedRkv<E>>
|
||||
where
|
||||
F: FnOnce(&Path) -> Result<Rkv<E>>,
|
||||
P: Into<&'p Path>,
|
||||
{
|
||||
let canonical = if cfg!(feature = "no-canonicalize-path") {
|
||||
path.into().to_path_buf()
|
||||
} else {
|
||||
canonicalize_path(path)?
|
||||
};
|
||||
Ok(match self.environments.entry(canonical) {
|
||||
Entry::Occupied(e) => e.get().clone(),
|
||||
Entry::Vacant(e) => {
|
||||
let k = Arc::new(RwLock::new(f(e.key().as_path())?));
|
||||
e.insert(k).clone()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the open env at `path` with `capacity`, or create it by calling `f`.
|
||||
pub fn get_or_create_with_capacity<'p, F, P>(&mut self, path: P, capacity: c_uint, f: F) -> Result<SharedRkv<E>>
|
||||
where
|
||||
F: FnOnce(&Path, c_uint) -> Result<Rkv<E>>,
|
||||
P: Into<&'p Path>,
|
||||
{
|
||||
let canonical = if cfg!(feature = "no-canonicalize-path") {
|
||||
path.into().to_path_buf()
|
||||
} else {
|
||||
canonicalize_path(path)?
|
||||
};
|
||||
Ok(match self.environments.entry(canonical) {
|
||||
Entry::Occupied(e) => e.get().clone(),
|
||||
Entry::Vacant(e) => {
|
||||
let k = Arc::new(RwLock::new(f(e.key().as_path(), capacity)?));
|
||||
e.insert(k).clone()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a new Rkv environment from the builder, or create it by calling `f`.
|
||||
pub fn get_or_create_from_builder<'p, F, P, B>(&mut self, path: P, builder: B, f: F) -> Result<SharedRkv<E>>
|
||||
where
|
||||
F: FnOnce(&Path, B) -> Result<Rkv<E>>,
|
||||
P: Into<&'p Path>,
|
||||
B: BackendEnvironmentBuilder<'e, Environment = E>,
|
||||
{
|
||||
let canonical = if cfg!(feature = "no-canonicalize-path") {
|
||||
path.into().to_path_buf()
|
||||
} else {
|
||||
canonicalize_path(path)?
|
||||
};
|
||||
Ok(match self.environments.entry(canonical) {
|
||||
Entry::Occupied(e) => e.get().clone(),
|
||||
Entry::Vacant(e) => {
|
||||
let k = Arc::new(RwLock::new(f(e.key().as_path(), builder)?));
|
||||
e.insert(k).clone()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Tries to close the specified environment and delete all its files from disk.
|
||||
/// Doesn't delete the folder used when opening the environment.
|
||||
/// This will only work if there's no other users of this environment.
|
||||
pub fn try_close_and_delete<'p, P>(&mut self, path: P) -> Result<()>
|
||||
where
|
||||
P: Into<&'p Path>,
|
||||
{
|
||||
let canonical = if cfg!(feature = "no-canonicalize-path") {
|
||||
path.into().to_path_buf()
|
||||
} else {
|
||||
canonicalize_path(path)?
|
||||
};
|
||||
match self.environments.entry(canonical) {
|
||||
Entry::Vacant(_) => {}, // noop
|
||||
Entry::Occupied(e) => {
|
||||
if Arc::strong_count(e.get()) == 1 {
|
||||
if let Ok(env) = Arc::try_unwrap(e.remove()) {
|
||||
env.into_inner()?.close_and_delete()?;
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Manager<LmdbEnvironment> {
|
||||
pub fn singleton() -> &'static RwLock<Manager<LmdbEnvironment>> {
|
||||
&*MANAGER_LMDB
|
||||
}
|
||||
}
|
||||
|
||||
impl Manager<SafeModeEnvironment> {
|
||||
pub fn singleton() -> &'static RwLock<Manager<SafeModeEnvironment>> {
|
||||
&*MANAGER_SAFE_MODE
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::*;
|
||||
|
||||
use std::fs;
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
use backend::Lmdb;
|
||||
|
||||
/// Test that one can mutate managed Rkv instances in surprising ways.
|
||||
#[test]
|
||||
fn test_mutate_managed_rkv() {
|
||||
let mut manager = Manager::<LmdbEnvironment>::new();
|
||||
|
||||
let root1 = Builder::new().prefix("test_mutate_managed_rkv_1").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root1.path()).expect("dir created");
|
||||
let path1 = root1.path();
|
||||
let arc = manager.get_or_create(path1, Rkv::new::<Lmdb>).expect("created");
|
||||
|
||||
// Arc<RwLock<>> has interior mutability, so we can replace arc's Rkv instance with a new
|
||||
// instance that has a different path.
|
||||
let root2 = Builder::new().prefix("test_mutate_managed_rkv_2").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root2.path()).expect("dir created");
|
||||
let path2 = root2.path();
|
||||
{
|
||||
let mut rkv = arc.write().expect("guard");
|
||||
let rkv2 = Rkv::new::<Lmdb>(path2).expect("Rkv");
|
||||
*rkv = rkv2;
|
||||
}
|
||||
|
||||
// Arc now has a different internal Rkv with path2, but it's still mapped to path1 in
|
||||
// manager, so its pointer is equal to a new Arc for path1.
|
||||
let path1_arc = manager.get(path1).expect("success").expect("existed");
|
||||
assert!(Arc::ptr_eq(&path1_arc, &arc));
|
||||
|
||||
// Meanwhile, a new Arc for path2 has a different pointer, even though its Rkv's path is
|
||||
// the same as arc's current path.
|
||||
let path2_arc = manager.get_or_create(path2, Rkv::new::<Lmdb>).expect("success");
|
||||
assert!(!Arc::ptr_eq(&path2_arc, &arc));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
//! A simple utility for migrating data from one RVK environment to another. Notably, this
|
||||
//! tool can migrate data from an enviroment created with a different backend than the
|
||||
//! current RKV consumer (e.g from Lmdb to SafeMode).
|
||||
//!
|
||||
//! The utility doesn't support migrating between 32-bit and 64-bit LMDB environments yet,
|
||||
//! see `arch_migrator` if this is needed. However, this utility is ultimately intended to
|
||||
//! handle all possible migrations.
|
||||
//!
|
||||
//! The destination environment should be empty of data, otherwise an error is returned.
|
||||
//!
|
||||
//! There are 3 versions of the migration methods:
|
||||
//! * `migrate_<src>_to_<dst>`, where `<src>` and `<dst>` are the source and destination
|
||||
//! environment types. You're responsive with opening both these environments, handling
|
||||
//! all errors, and performing any cleanup if necessary.
|
||||
//! * `open_and_migrate_<src>_to_<dst>`, which is similar the the above, but automatically
|
||||
//! attempts to open the source environment and delete all of its supporting files if
|
||||
//! there's no other environment open at that path. You're still responsible with
|
||||
//! handling all errors.
|
||||
//! * `easy_migrate_<src>_to_<dst>` which is similar to the above, but ignores the
|
||||
//! migration and doesn't delete any files if the source environment is invalid
|
||||
//! (corrupted), unavailable (path not found or incompatible with configuration), or
|
||||
//! empty (database has no records).
|
||||
//!
|
||||
//! The tool currently has these limitations:
|
||||
//!
|
||||
//! 1. It doesn't support migration from environments created with
|
||||
//! `EnvironmentFlags::NO_SUB_DIR`. To migrate such an environment, create a temporary
|
||||
//! directory, copy the environment's data files in the temporary directory, then
|
||||
//! migrate the temporary directory as the source environment.
|
||||
//! 2. It doesn't support migration from databases created with DatabaseFlags::DUP_SORT`
|
||||
//! (with or without `DatabaseFlags::DUP_FIXED`) nor with `DatabaseFlags::INTEGER_KEY`.
|
||||
//! This effectively means that migration is limited to `SingleStore`s.
|
||||
//! 3. It doesn't allow for existing data in the destination environment, which means that
|
||||
//! it cannot overwrite nor append data.
|
||||
|
||||
use crate::{
|
||||
backend::{
|
||||
LmdbEnvironment,
|
||||
SafeModeEnvironment,
|
||||
},
|
||||
error::MigrateError,
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
};
|
||||
|
||||
pub use crate::backend::{
|
||||
LmdbArchMigrateError,
|
||||
LmdbArchMigrateResult,
|
||||
LmdbArchMigrator,
|
||||
};
|
||||
|
||||
// FIXME: should parametrize this instead.
|
||||
|
||||
macro_rules! fn_migrator {
|
||||
($name:tt, $src_env:ty, $dst_env:ty) => {
|
||||
/// Migrate all data in all of databases from the source environment to the destination
|
||||
/// environment. This includes all key/value pairs in the main database that aren't
|
||||
/// metadata about subdatabases and all key/value pairs in all subdatabases.
|
||||
///
|
||||
/// Other backend-specific metadata such as map size or maximum databases left intact on
|
||||
/// the given environments.
|
||||
///
|
||||
/// The destination environment should be empty of data, otherwise an error is returned.
|
||||
pub fn $name<S, D>(src_env: S, dst_env: D) -> Result<(), MigrateError>
|
||||
where
|
||||
S: std::ops::Deref<Target = Rkv<$src_env>>,
|
||||
D: std::ops::Deref<Target = Rkv<$dst_env>>,
|
||||
{
|
||||
let src_dbs = src_env.get_dbs().unwrap();
|
||||
if src_dbs.is_empty() {
|
||||
return Err(MigrateError::SourceEmpty);
|
||||
}
|
||||
let dst_dbs = dst_env.get_dbs().unwrap();
|
||||
if !dst_dbs.is_empty() {
|
||||
return Err(MigrateError::DestinationNotEmpty);
|
||||
}
|
||||
for name in src_dbs {
|
||||
let src_store = src_env.open_single(name.as_deref(), StoreOptions::default())?;
|
||||
let dst_store = dst_env.open_single(name.as_deref(), StoreOptions::create())?;
|
||||
let reader = src_env.read()?;
|
||||
let mut writer = dst_env.write()?;
|
||||
let mut iter = src_store.iter_start(&reader)?;
|
||||
while let Some(Ok((key, value))) = iter.next() {
|
||||
dst_store.put(&mut writer, key, &value).expect("wrote");
|
||||
}
|
||||
writer.commit()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
|
||||
(open $migrate:tt, $name:tt, $builder:tt, $src_env:ty, $dst_env:ty) => {
|
||||
/// Same as the non `open_*` migration method, but automatically attempts to open the
|
||||
/// source environment. Finally, deletes all of its supporting files if there's no other
|
||||
/// environment open at that path.
|
||||
pub fn $name<F, D>(path: &std::path::Path, build: F, dst_env: D) -> Result<(), MigrateError>
|
||||
where
|
||||
F: FnOnce(crate::backend::$builder) -> crate::backend::$builder,
|
||||
D: std::ops::Deref<Target = Rkv<$dst_env>>,
|
||||
{
|
||||
use crate::backend::*;
|
||||
|
||||
let mut manager = crate::Manager::<$src_env>::singleton().write()?;
|
||||
let mut builder = Rkv::<$src_env>::environment_builder::<$builder>();
|
||||
builder.set_max_dbs(crate::env::DEFAULT_MAX_DBS);
|
||||
builder = build(builder);
|
||||
|
||||
let src_env = manager.get_or_create_from_builder(path, builder, Rkv::from_builder::<$builder>)?;
|
||||
Migrator::$migrate(src_env.read()?, dst_env)?;
|
||||
|
||||
drop(src_env);
|
||||
manager.try_close_and_delete(path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
|
||||
(easy $migrate:tt, $name:tt, $src_env:ty, $dst_env:ty) => {
|
||||
/// Same as the `open_*` migration method, but ignores the migration and doesn't delete
|
||||
/// any files if the source environment is invalid (corrupted), unavailable, or empty.
|
||||
pub fn $name<D>(path: &std::path::Path, dst_env: D) -> Result<(), MigrateError>
|
||||
where
|
||||
D: std::ops::Deref<Target = Rkv<$dst_env>>,
|
||||
{
|
||||
match Migrator::$migrate(path, |builder| builder, dst_env) {
|
||||
Err(crate::MigrateError::StoreError(crate::StoreError::FileInvalid)) => Ok(()),
|
||||
Err(crate::MigrateError::StoreError(crate::StoreError::IoError(_))) => Ok(()),
|
||||
Err(crate::MigrateError::StoreError(crate::StoreError::UnsuitableEnvironmentPath(_))) => Ok(()),
|
||||
Err(crate::MigrateError::SourceEmpty) => Ok(()),
|
||||
result => result,
|
||||
}?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! fns_migrator {
|
||||
($src:tt, $dst:tt) => {
|
||||
paste::item! {
|
||||
fns_migrator!([<migrate_ $src _to_ $dst>], $src, $dst);
|
||||
fns_migrator!([<migrate_ $dst _to_ $src>], $dst, $src);
|
||||
}
|
||||
};
|
||||
($name:tt, $src:tt, $dst:tt) => {
|
||||
paste::item! {
|
||||
fn_migrator!($name, [<$src:camel Environment>], [<$dst:camel Environment>]);
|
||||
fn_migrator!(open $name, [<open_and_ $name>], [<$src:camel>], [<$src:camel Environment>], [<$dst:camel Environment>]);
|
||||
fn_migrator!(easy [<open_and_ $name>], [<easy_ $name>], [<$src:camel Environment>], [<$dst:camel Environment>]);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub struct Migrator;
|
||||
|
||||
impl Migrator {
|
||||
fns_migrator!(lmdb, safe_mode);
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use crate::{
|
||||
backend::{
|
||||
BackendDatabase,
|
||||
BackendRoCursor,
|
||||
BackendRoCursorTransaction,
|
||||
BackendRoTransaction,
|
||||
BackendRwCursorTransaction,
|
||||
BackendRwTransaction,
|
||||
},
|
||||
error::StoreError,
|
||||
helpers::read_transform,
|
||||
value::Value,
|
||||
};
|
||||
|
||||
pub struct Reader<T>(T);
|
||||
pub struct Writer<T>(T);
|
||||
|
||||
pub trait Readable<'r> {
|
||||
type Database: BackendDatabase;
|
||||
type RoCursor: BackendRoCursor<'r>;
|
||||
|
||||
fn get<K>(&'r self, db: &Self::Database, k: &K) -> Result<Option<Value<'r>>, StoreError>
|
||||
where
|
||||
K: AsRef<[u8]>;
|
||||
|
||||
fn open_ro_cursor(&'r self, db: &Self::Database) -> Result<Self::RoCursor, StoreError>;
|
||||
}
|
||||
|
||||
impl<'r, T> Readable<'r> for Reader<T>
|
||||
where
|
||||
T: BackendRoCursorTransaction<'r>,
|
||||
{
|
||||
type Database = T::Database;
|
||||
type RoCursor = T::RoCursor;
|
||||
|
||||
fn get<K>(&'r self, db: &T::Database, k: &K) -> Result<Option<Value<'r>>, StoreError>
|
||||
where
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
let bytes = self.0.get(db, k.as_ref()).map_err(|e| e.into());
|
||||
match read_transform(bytes).map(Some) {
|
||||
Err(StoreError::KeyValuePairNotFound) => Ok(None),
|
||||
result => result,
|
||||
}
|
||||
}
|
||||
|
||||
fn open_ro_cursor(&'r self, db: &T::Database) -> Result<T::RoCursor, StoreError> {
|
||||
self.0.open_ro_cursor(db).map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Reader<T> {
|
||||
pub(crate) fn new(txn: T) -> Reader<T> {
|
||||
Reader(txn)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Reader<T>
|
||||
where
|
||||
T: BackendRoTransaction,
|
||||
{
|
||||
pub fn abort(self) {
|
||||
self.0.abort();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'r, T> Readable<'r> for Writer<T>
|
||||
where
|
||||
T: BackendRwCursorTransaction<'r>,
|
||||
{
|
||||
type Database = T::Database;
|
||||
type RoCursor = T::RoCursor;
|
||||
|
||||
fn get<K>(&'r self, db: &T::Database, k: &K) -> Result<Option<Value<'r>>, StoreError>
|
||||
where
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
let bytes = self.0.get(db, k.as_ref()).map_err(|e| e.into());
|
||||
match read_transform(bytes).map(Some) {
|
||||
Err(StoreError::KeyValuePairNotFound) => Ok(None),
|
||||
result => result,
|
||||
}
|
||||
}
|
||||
|
||||
fn open_ro_cursor(&'r self, db: &T::Database) -> Result<T::RoCursor, StoreError> {
|
||||
self.0.open_ro_cursor(db).map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Writer<T> {
|
||||
pub(crate) fn new(txn: T) -> Writer<T> {
|
||||
Writer(txn)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Writer<T>
|
||||
where
|
||||
T: BackendRwTransaction,
|
||||
{
|
||||
pub fn commit(self) -> Result<(), StoreError> {
|
||||
self.0.commit().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub fn abort(self) {
|
||||
self.0.abort();
|
||||
}
|
||||
|
||||
pub(crate) fn put<K>(&mut self, db: &T::Database, k: &K, v: &Value, flags: T::Flags) -> Result<(), StoreError>
|
||||
where
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
// TODO: don't allocate twice.
|
||||
self.0.put(db, k.as_ref(), &v.to_bytes()?, flags).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "db-dup-sort"))]
|
||||
pub(crate) fn delete<K>(&mut self, db: &T::Database, k: &K) -> Result<(), StoreError>
|
||||
where
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
self.0.del(db, k.as_ref()).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
pub(crate) fn delete<K>(&mut self, db: &T::Database, k: &K, v: Option<&[u8]>) -> Result<(), StoreError>
|
||||
where
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
self.0.del(db, k.as_ref(), v).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub(crate) fn clear(&mut self, db: &T::Database) -> Result<(), StoreError> {
|
||||
self.0.clear_db(db).map_err(|e| e.into())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
pub mod keys;
|
||||
pub mod single;
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
pub mod multi;
|
||||
|
||||
#[cfg(feature = "db-int-key")]
|
||||
pub mod integer;
|
||||
|
||||
#[cfg(all(feature = "db-dup-sort", feature = "db-int-key"))]
|
||||
pub mod integermulti;
|
||||
|
||||
use crate::backend::BackendDatabaseFlags;
|
||||
|
||||
#[derive(Default, Debug, Copy, Clone)]
|
||||
pub struct Options<F> {
|
||||
pub create: bool,
|
||||
pub flags: F,
|
||||
}
|
||||
|
||||
impl<F> Options<F>
|
||||
where
|
||||
F: BackendDatabaseFlags,
|
||||
{
|
||||
pub fn create() -> Options<F> {
|
||||
Options {
|
||||
create: true,
|
||||
flags: F::empty(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,548 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::{
|
||||
backend::{
|
||||
BackendDatabase,
|
||||
BackendRwTransaction,
|
||||
},
|
||||
error::StoreError,
|
||||
readwrite::{
|
||||
Readable,
|
||||
Writer,
|
||||
},
|
||||
store::{
|
||||
keys::{
|
||||
Key,
|
||||
PrimitiveInt,
|
||||
},
|
||||
single::SingleStore,
|
||||
},
|
||||
value::Value,
|
||||
};
|
||||
|
||||
type EmptyResult = Result<(), StoreError>;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||
pub struct IntegerStore<D, K> {
|
||||
inner: SingleStore<D>,
|
||||
phantom: PhantomData<K>,
|
||||
}
|
||||
|
||||
impl<D, K> IntegerStore<D, K>
|
||||
where
|
||||
D: BackendDatabase,
|
||||
K: PrimitiveInt,
|
||||
{
|
||||
pub(crate) fn new(db: D) -> IntegerStore<D, K> {
|
||||
IntegerStore {
|
||||
inner: SingleStore::new(db),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get<'r, R>(&self, reader: &'r R, k: K) -> Result<Option<Value<'r>>, StoreError>
|
||||
where
|
||||
R: Readable<'r, Database = D>,
|
||||
{
|
||||
self.inner.get(reader, Key::new(&k)?)
|
||||
}
|
||||
|
||||
pub fn put<T>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
self.inner.put(writer, Key::new(&k)?, v)
|
||||
}
|
||||
|
||||
pub fn delete<T>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
self.inner.delete(writer, Key::new(&k)?)
|
||||
}
|
||||
|
||||
pub fn clear<T>(&self, writer: &mut Writer<T>) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
self.inner.clear(writer)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::*;
|
||||
|
||||
use std::fs;
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
#[test]
|
||||
fn test_integer_keys() {
|
||||
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
macro_rules! test_integer_keys {
|
||||
($type:ty, $key:expr) => {{
|
||||
let mut writer = k.write().expect("writer");
|
||||
|
||||
s.put(&mut writer, $key, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
}};
|
||||
}
|
||||
|
||||
test_integer_keys!(u32, std::u32::MIN);
|
||||
test_integer_keys!(u32, std::u32::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear() {
|
||||
let root = Builder::new().prefix("test_integer_clear").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 3, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("hello!")));
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.clear(&mut writer).expect("cleared");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dup() {
|
||||
let root = Builder::new().prefix("test_integer_dup").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("foo!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("bar!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("bar!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.clear(&mut writer).expect("cleared");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_del() {
|
||||
let root = Builder::new().prefix("test_integer_del").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("foo!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("bar!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("bar!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1).expect("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), None);
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 2).expect_err("not deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 3).expect_err("not deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persist() {
|
||||
let root = Builder::new().prefix("test_integer_persist").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
{
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 3, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("hello!")));
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), Some(Value::Str("hello!")));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intertwine_read_write() {
|
||||
let root = Builder::new().prefix("test_integer_intertwine_read_write").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut writer = k.write().expect("writer");
|
||||
|
||||
{
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
|
||||
{
|
||||
s.put(&mut writer, 1, &Value::Str("goodbye!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("goodbye!")).expect("write");
|
||||
s.put(&mut writer, 3, &Value::Str("goodbye!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("goodbye!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("goodbye!")));
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("goodbye!")));
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("goodbye!")));
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("goodbye!")));
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let reader = k.write().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), Some(Value::Str("goodbye!")));
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), Some(Value::Str("goodbye!")));
|
||||
reader.commit().expect("committed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests_safe {
|
||||
use super::*;
|
||||
use crate::*;
|
||||
|
||||
use std::fs;
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
#[test]
|
||||
fn test_integer_keys() {
|
||||
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
macro_rules! test_integer_keys {
|
||||
($type:ty, $key:expr) => {{
|
||||
let mut writer = k.write().expect("writer");
|
||||
|
||||
s.put(&mut writer, $key, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
}};
|
||||
}
|
||||
|
||||
test_integer_keys!(u32, std::u32::MIN);
|
||||
test_integer_keys!(u32, std::u32::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear() {
|
||||
let root = Builder::new().prefix("test_integer_clear").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 3, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("hello!")));
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.clear(&mut writer).expect("cleared");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dup() {
|
||||
let root = Builder::new().prefix("test_integer_dup").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("foo!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("bar!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("bar!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.clear(&mut writer).expect("cleared");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_del() {
|
||||
let root = Builder::new().prefix("test_integer_del").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("foo!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("bar!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("bar!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1).expect("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), None);
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 2).expect_err("not deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 3).expect_err("not deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persist() {
|
||||
let root = Builder::new().prefix("test_integer_persist").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
{
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 3, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("hello!")));
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), Some(Value::Str("hello!")));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intertwine_read_write() {
|
||||
let root = Builder::new().prefix("test_integer_intertwine_read_write").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut writer = k.write().expect("writer");
|
||||
|
||||
{
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
|
||||
{
|
||||
s.put(&mut writer, 1, &Value::Str("goodbye!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("goodbye!")).expect("write");
|
||||
s.put(&mut writer, 3, &Value::Str("goodbye!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("goodbye!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("goodbye!")));
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("goodbye!")));
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), None);
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&writer, 2).expect("read"), Some(Value::Str("goodbye!")));
|
||||
assert_eq!(s.get(&writer, 3).expect("read"), Some(Value::Str("goodbye!")));
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let reader = k.write().expect("reader");
|
||||
assert_eq!(s.get(&reader, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get(&reader, 2).expect("read"), Some(Value::Str("goodbye!")));
|
||||
assert_eq!(s.get(&reader, 3).expect("read"), Some(Value::Str("goodbye!")));
|
||||
reader.commit().expect("committed");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,543 @@
|
|||
// Copyright 2018 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::{
|
||||
backend::{
|
||||
BackendDatabase,
|
||||
BackendIter,
|
||||
BackendRoCursor,
|
||||
BackendRwTransaction,
|
||||
},
|
||||
error::StoreError,
|
||||
readwrite::{
|
||||
Readable,
|
||||
Writer,
|
||||
},
|
||||
store::{
|
||||
keys::{
|
||||
Key,
|
||||
PrimitiveInt,
|
||||
},
|
||||
multi::{
|
||||
Iter,
|
||||
MultiStore,
|
||||
},
|
||||
},
|
||||
value::Value,
|
||||
};
|
||||
|
||||
type EmptyResult = Result<(), StoreError>;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||
pub struct MultiIntegerStore<D, K> {
|
||||
inner: MultiStore<D>,
|
||||
phantom: PhantomData<K>,
|
||||
}
|
||||
|
||||
impl<D, K> MultiIntegerStore<D, K>
|
||||
where
|
||||
D: BackendDatabase,
|
||||
K: PrimitiveInt,
|
||||
{
|
||||
pub(crate) fn new(db: D) -> MultiIntegerStore<D, K> {
|
||||
MultiIntegerStore {
|
||||
inner: MultiStore::new(db),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get<'r, R, I, C>(&self, reader: &'r R, k: K) -> Result<Iter<'r, I>, StoreError>
|
||||
where
|
||||
R: Readable<'r, Database = D, RoCursor = C>,
|
||||
I: BackendIter<'r>,
|
||||
C: BackendRoCursor<'r, Iter = I>,
|
||||
K: 'r,
|
||||
{
|
||||
self.inner.get(reader, Key::new(&k)?)
|
||||
}
|
||||
|
||||
pub fn get_first<'r, R>(&self, reader: &'r R, k: K) -> Result<Option<Value<'r>>, StoreError>
|
||||
where
|
||||
R: Readable<'r, Database = D>,
|
||||
{
|
||||
self.inner.get_first(reader, Key::new(&k)?)
|
||||
}
|
||||
|
||||
pub fn put<T>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
self.inner.put(writer, Key::new(&k)?, v)
|
||||
}
|
||||
|
||||
pub fn put_with_flags<T>(&self, writer: &mut Writer<T>, k: K, v: &Value, flags: T::Flags) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
self.inner.put_with_flags(writer, Key::new(&k)?, v, flags)
|
||||
}
|
||||
|
||||
pub fn delete_all<T>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
self.inner.delete_all(writer, Key::new(&k)?)
|
||||
}
|
||||
|
||||
pub fn delete<T>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
self.inner.delete(writer, Key::new(&k)?, v)
|
||||
}
|
||||
|
||||
pub fn clear<T>(&self, writer: &mut Writer<T>) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
self.inner.clear(writer)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::*;
|
||||
|
||||
use std::fs;
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
#[test]
|
||||
fn test_integer_keys() {
|
||||
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
macro_rules! test_integer_keys {
|
||||
($type:ty, $key:expr) => {{
|
||||
let mut writer = k.write().expect("writer");
|
||||
|
||||
s.put(&mut writer, $key, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get_first(&writer, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get_first(&reader, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
}};
|
||||
}
|
||||
|
||||
test_integer_keys!(u32, std::u32::MIN);
|
||||
test_integer_keys!(u32, std::u32::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear() {
|
||||
let root = Builder::new().prefix("test_multi_integer_clear").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get_first(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get_first(&writer, 2).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get_first(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.clear(&mut writer).expect("cleared");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get_first(&reader, 1).expect("read"), None);
|
||||
assert_eq!(s.get_first(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get_first(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dup() {
|
||||
let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
assert_eq!(s.get_first(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get_first(&writer, 2).expect("read"), None);
|
||||
assert_eq!(s.get_first(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.clear(&mut writer).expect("cleared");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get_first(&reader, 1).expect("read"), None);
|
||||
assert_eq!(s.get_first(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get_first(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dup_2() {
|
||||
let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
|
||||
let mut iter = s.get(&writer, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
|
||||
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_del() {
|
||||
let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
{
|
||||
let mut iter = s.get(&writer, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
|
||||
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1, &Value::Str("hello!")).expect("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1, &Value::Str("hello!")).expect_err("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1, &Value::Str("hello1!")).expect("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1, &Value::Str("hello1!")).expect_err("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persist() {
|
||||
let root = Builder::new().prefix("test_multi_integer_persist").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
{
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
|
||||
{
|
||||
let mut iter = s.get(&writer, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
|
||||
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let k = Rkv::new::<backend::Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
|
||||
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests_safe {
|
||||
use super::*;
|
||||
use crate::*;
|
||||
|
||||
use std::fs;
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
#[test]
|
||||
fn test_integer_keys() {
|
||||
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
macro_rules! test_integer_keys {
|
||||
($type:ty, $key:expr) => {{
|
||||
let mut writer = k.write().expect("writer");
|
||||
|
||||
s.put(&mut writer, $key, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get_first(&writer, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get_first(&reader, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
}};
|
||||
}
|
||||
|
||||
test_integer_keys!(u32, std::u32::MIN);
|
||||
test_integer_keys!(u32, std::u32::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear() {
|
||||
let root = Builder::new().prefix("test_multi_integer_clear").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!(s.get_first(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get_first(&writer, 2).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get_first(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.clear(&mut writer).expect("cleared");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get_first(&reader, 1).expect("read"), None);
|
||||
assert_eq!(s.get_first(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get_first(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dup() {
|
||||
let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
assert_eq!(s.get_first(&writer, 1).expect("read"), Some(Value::Str("hello!")));
|
||||
assert_eq!(s.get_first(&writer, 2).expect("read"), None);
|
||||
assert_eq!(s.get_first(&writer, 3).expect("read"), None);
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.clear(&mut writer).expect("cleared");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(s.get_first(&reader, 1).expect("read"), None);
|
||||
assert_eq!(s.get_first(&reader, 2).expect("read"), None);
|
||||
assert_eq!(s.get_first(&reader, 3).expect("read"), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dup_2() {
|
||||
let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
|
||||
let mut iter = s.get(&writer, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
|
||||
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_del() {
|
||||
let root = Builder::new().prefix("test_multi_integer_dup").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
{
|
||||
let mut iter = s.get(&writer, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
|
||||
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1, &Value::Str("hello!")).expect("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1, &Value::Str("hello!")).expect_err("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1, &Value::Str("hello1!")).expect("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.delete(&mut writer, 1, &Value::Str("hello1!")).expect_err("deleted");
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persist() {
|
||||
let root = Builder::new().prefix("test_multi_integer_persist").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
{
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
let mut writer = k.write().expect("writer");
|
||||
s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
|
||||
s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
|
||||
s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
|
||||
{
|
||||
let mut iter = s.get(&writer, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
|
||||
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
writer.commit().expect("committed");
|
||||
}
|
||||
|
||||
{
|
||||
let k = Rkv::new::<backend::SafeMode>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let mut iter = s.get(&reader, 1).expect("read");
|
||||
assert_eq!(iter.next().expect("first").expect("ok").1, Value::Str("hello!"));
|
||||
assert_eq!(iter.next().expect("second").expect("ok").1, Value::Str("hello1!"));
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
mod encodables;
|
||||
mod primitives;
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::error::DataError;
|
||||
|
||||
pub use encodables::*;
|
||||
pub use primitives::*;
|
||||
|
||||
pub(crate) struct Key<K> {
|
||||
bytes: Vec<u8>,
|
||||
phantom: PhantomData<K>,
|
||||
}
|
||||
|
||||
impl<K> AsRef<[u8]> for Key<K>
|
||||
where
|
||||
K: EncodableKey,
|
||||
{
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.bytes.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K> Key<K>
|
||||
where
|
||||
K: EncodableKey,
|
||||
{
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(k: &K) -> Result<Key<K>, DataError> {
|
||||
Ok(Key {
|
||||
bytes: k.to_bytes()?,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use bincode::serialize;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::error::DataError;
|
||||
|
||||
pub trait EncodableKey {
|
||||
fn to_bytes(&self) -> Result<Vec<u8>, DataError>;
|
||||
}
|
||||
|
||||
impl<T> EncodableKey for T
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
fn to_bytes(&self) -> Result<Vec<u8>, DataError> {
|
||||
serialize(self).map_err(|e| e.into())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use crate::store::keys::EncodableKey;
|
||||
|
||||
pub trait PrimitiveInt: EncodableKey {}
|
||||
|
||||
impl PrimitiveInt for u32 {}
|
|
@ -0,0 +1,140 @@
|
|||
// Copyright 2018 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::{
|
||||
backend::{
|
||||
BackendDatabase,
|
||||
BackendFlags,
|
||||
BackendIter,
|
||||
BackendRoCursor,
|
||||
BackendRwTransaction,
|
||||
},
|
||||
error::StoreError,
|
||||
helpers::read_transform,
|
||||
readwrite::{
|
||||
Readable,
|
||||
Writer,
|
||||
},
|
||||
value::Value,
|
||||
};
|
||||
|
||||
type EmptyResult = Result<(), StoreError>;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||
pub struct MultiStore<D> {
|
||||
db: D,
|
||||
}
|
||||
|
||||
pub struct Iter<'i, I> {
|
||||
iter: I,
|
||||
phantom: PhantomData<&'i ()>,
|
||||
}
|
||||
|
||||
impl<D> MultiStore<D>
|
||||
where
|
||||
D: BackendDatabase,
|
||||
{
|
||||
pub(crate) fn new(db: D) -> MultiStore<D> {
|
||||
MultiStore {
|
||||
db,
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides a cursor to all of the values for the duplicate entries that match this
|
||||
/// key
|
||||
pub fn get<'r, R, I, C, K>(&self, reader: &'r R, k: K) -> Result<Iter<'r, I>, StoreError>
|
||||
where
|
||||
R: Readable<'r, Database = D, RoCursor = C>,
|
||||
I: BackendIter<'r>,
|
||||
C: BackendRoCursor<'r, Iter = I>,
|
||||
K: AsRef<[u8]> + 'r,
|
||||
{
|
||||
let cursor = reader.open_ro_cursor(&self.db)?;
|
||||
let iter = cursor.into_iter_dup_of(k);
|
||||
|
||||
Ok(Iter {
|
||||
iter,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// Provides the first value that matches this key
|
||||
pub fn get_first<'r, R, K>(&self, reader: &'r R, k: K) -> Result<Option<Value<'r>>, StoreError>
|
||||
where
|
||||
R: Readable<'r, Database = D>,
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
reader.get(&self.db, &k)
|
||||
}
|
||||
|
||||
/// Insert a value at the specified key.
|
||||
/// This put will allow duplicate entries. If you wish to have duplicate entries
|
||||
/// rejected, use the `put_with_flags` function and specify NO_DUP_DATA
|
||||
pub fn put<T, K>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
writer.put(&self.db, &k, v, T::Flags::empty())
|
||||
}
|
||||
|
||||
pub fn put_with_flags<T, K>(&self, writer: &mut Writer<T>, k: K, v: &Value, flags: T::Flags) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
writer.put(&self.db, &k, v, flags)
|
||||
}
|
||||
|
||||
pub fn delete_all<T, K>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
writer.delete(&self.db, &k, None)
|
||||
}
|
||||
|
||||
pub fn delete<T, K>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
writer.delete(&self.db, &k, Some(&v.to_bytes()?))
|
||||
}
|
||||
|
||||
pub fn clear<T>(&self, writer: &mut Writer<T>) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
writer.clear(&self.db)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'i, I> Iterator for Iter<'i, I>
|
||||
where
|
||||
I: BackendIter<'i>,
|
||||
{
|
||||
type Item = Result<(&'i [u8], Value<'i>), StoreError>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match self.iter.next() {
|
||||
None => None,
|
||||
Some(Ok((key, bytes))) => {
|
||||
match read_transform(Ok(bytes)) {
|
||||
Ok(val) => Some(Ok((key, val))),
|
||||
Err(err) => Some(Err(err)),
|
||||
}
|
||||
},
|
||||
Some(Err(err)) => Some(Err(err.into())),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
// Copyright 2018 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::{
|
||||
backend::{
|
||||
BackendDatabase,
|
||||
BackendFlags,
|
||||
BackendIter,
|
||||
BackendRoCursor,
|
||||
BackendRwTransaction,
|
||||
},
|
||||
error::StoreError,
|
||||
helpers::read_transform,
|
||||
readwrite::{
|
||||
Readable,
|
||||
Writer,
|
||||
},
|
||||
value::Value,
|
||||
};
|
||||
|
||||
type EmptyResult = Result<(), StoreError>;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||
pub struct SingleStore<D> {
|
||||
db: D,
|
||||
}
|
||||
|
||||
pub struct Iter<'i, I> {
|
||||
iter: I,
|
||||
phantom: PhantomData<&'i ()>,
|
||||
}
|
||||
|
||||
impl<D> SingleStore<D>
|
||||
where
|
||||
D: BackendDatabase,
|
||||
{
|
||||
pub(crate) fn new(db: D) -> SingleStore<D> {
|
||||
SingleStore {
|
||||
db,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get<'r, R, K>(&self, reader: &'r R, k: K) -> Result<Option<Value<'r>>, StoreError>
|
||||
where
|
||||
R: Readable<'r, Database = D>,
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
reader.get(&self.db, &k)
|
||||
}
|
||||
|
||||
// TODO: flags
|
||||
pub fn put<T, K>(&self, writer: &mut Writer<T>, k: K, v: &Value) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
writer.put(&self.db, &k, v, T::Flags::empty())
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "db-dup-sort"))]
|
||||
pub fn delete<T, K>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
writer.delete(&self.db, &k)
|
||||
}
|
||||
|
||||
#[cfg(feature = "db-dup-sort")]
|
||||
pub fn delete<T, K>(&self, writer: &mut Writer<T>, k: K) -> EmptyResult
|
||||
where
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
K: AsRef<[u8]>,
|
||||
{
|
||||
writer.delete(&self.db, &k, None)
|
||||
}
|
||||
|
||||
pub fn iter_start<'r, R, I, C>(&self, reader: &'r R) -> Result<Iter<'r, I>, StoreError>
|
||||
where
|
||||
R: Readable<'r, Database = D, RoCursor = C>,
|
||||
I: BackendIter<'r>,
|
||||
C: BackendRoCursor<'r, Iter = I>,
|
||||
{
|
||||
let cursor = reader.open_ro_cursor(&self.db)?;
|
||||
let iter = cursor.into_iter();
|
||||
|
||||
Ok(Iter {
|
||||
iter,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn iter_from<'r, R, I, C, K>(&self, reader: &'r R, k: K) -> Result<Iter<'r, I>, StoreError>
|
||||
where
|
||||
R: Readable<'r, Database = D, RoCursor = C>,
|
||||
I: BackendIter<'r>,
|
||||
C: BackendRoCursor<'r, Iter = I>,
|
||||
K: AsRef<[u8]> + 'r,
|
||||
{
|
||||
let cursor = reader.open_ro_cursor(&self.db)?;
|
||||
let iter = cursor.into_iter_from(k);
|
||||
|
||||
Ok(Iter {
|
||||
iter,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn clear<T>(&self, writer: &mut Writer<T>) -> EmptyResult
|
||||
where
|
||||
D: BackendDatabase,
|
||||
T: BackendRwTransaction<Database = D>,
|
||||
{
|
||||
writer.clear(&self.db)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'i, I> Iterator for Iter<'i, I>
|
||||
where
|
||||
I: BackendIter<'i>,
|
||||
{
|
||||
type Item = Result<(&'i [u8], Value<'i>), StoreError>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match self.iter.next() {
|
||||
None => None,
|
||||
Some(Ok((key, bytes))) => {
|
||||
match read_transform(Ok(bytes)) {
|
||||
Ok(val) => Some(Ok((key, val))),
|
||||
Err(err) => Some(Err(err)),
|
||||
}
|
||||
},
|
||||
Some(Err(err)) => Some(Err(err.into())),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,256 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use arrayref::array_ref;
|
||||
use bincode::{
|
||||
deserialize,
|
||||
serialize,
|
||||
serialized_size,
|
||||
};
|
||||
use ordered_float::OrderedFloat;
|
||||
use uuid::{
|
||||
Bytes,
|
||||
Uuid,
|
||||
};
|
||||
|
||||
use crate::error::DataError;
|
||||
|
||||
/// We define a set of types, associated with simple integers, to annotate values stored
|
||||
/// in LMDB. This is to avoid an accidental 'cast' from a value of one type to another.
|
||||
/// For this reason we don't simply use `deserialize` from the `bincode` crate.
|
||||
#[repr(u8)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum Type {
|
||||
Bool = 1,
|
||||
U64 = 2,
|
||||
I64 = 3,
|
||||
F64 = 4,
|
||||
Instant = 5, // Millisecond-precision timestamp.
|
||||
Uuid = 6,
|
||||
Str = 7,
|
||||
Json = 8,
|
||||
Blob = 9,
|
||||
}
|
||||
|
||||
/// We use manual tagging, because <https://github.com/serde-rs/serde/issues/610>.
|
||||
impl Type {
|
||||
pub fn from_tag(tag: u8) -> Result<Type, DataError> {
|
||||
Type::from_primitive(tag).ok_or_else(|| DataError::UnknownType(tag))
|
||||
}
|
||||
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
pub fn to_tag(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
fn from_primitive(p: u8) -> Option<Type> {
|
||||
match p {
|
||||
1 => Some(Type::Bool),
|
||||
2 => Some(Type::U64),
|
||||
3 => Some(Type::I64),
|
||||
4 => Some(Type::F64),
|
||||
5 => Some(Type::Instant),
|
||||
6 => Some(Type::Uuid),
|
||||
7 => Some(Type::Str),
|
||||
8 => Some(Type::Json),
|
||||
9 => Some(Type::Blob),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Type {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
f.write_str(match *self {
|
||||
Type::Bool => "bool",
|
||||
Type::U64 => "u64",
|
||||
Type::I64 => "i64",
|
||||
Type::F64 => "f64",
|
||||
Type::Instant => "instant",
|
||||
Type::Uuid => "uuid",
|
||||
Type::Str => "str",
|
||||
Type::Json => "json",
|
||||
Type::Blob => "blob",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub enum Value<'v> {
|
||||
Bool(bool),
|
||||
U64(u64),
|
||||
I64(i64),
|
||||
F64(OrderedFloat<f64>),
|
||||
Instant(i64), // Millisecond-precision timestamp.
|
||||
Uuid(&'v Bytes),
|
||||
Str(&'v str),
|
||||
Json(&'v str),
|
||||
Blob(&'v [u8]),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum OwnedValue {
|
||||
Bool(bool),
|
||||
U64(u64),
|
||||
I64(i64),
|
||||
F64(f64),
|
||||
Instant(i64), // Millisecond-precision timestamp.
|
||||
Uuid(Uuid),
|
||||
Str(String),
|
||||
Json(String), // TODO
|
||||
Blob(Vec<u8>),
|
||||
}
|
||||
|
||||
fn uuid(bytes: &[u8]) -> Result<Value, DataError> {
|
||||
if bytes.len() == 16 {
|
||||
Ok(Value::Uuid(array_ref![bytes, 0, 16]))
|
||||
} else {
|
||||
Err(DataError::InvalidUuid)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'v> Value<'v> {
|
||||
pub fn from_tagged_slice(slice: &'v [u8]) -> Result<Value<'v>, DataError> {
|
||||
let (tag, data) = slice.split_first().ok_or(DataError::Empty)?;
|
||||
let t = Type::from_tag(*tag)?;
|
||||
Value::from_type_and_data(t, data)
|
||||
}
|
||||
|
||||
fn from_type_and_data(t: Type, data: &'v [u8]) -> Result<Value<'v>, DataError> {
|
||||
if t == Type::Uuid {
|
||||
return deserialize(data)
|
||||
.map_err(|e| {
|
||||
DataError::DecodingError {
|
||||
value_type: t,
|
||||
err: e,
|
||||
}
|
||||
})
|
||||
.map(uuid)?;
|
||||
}
|
||||
|
||||
match t {
|
||||
Type::Bool => deserialize(data).map(Value::Bool),
|
||||
Type::U64 => deserialize(data).map(Value::U64),
|
||||
Type::I64 => deserialize(data).map(Value::I64),
|
||||
Type::F64 => deserialize(data).map(OrderedFloat).map(Value::F64),
|
||||
Type::Instant => deserialize(data).map(Value::Instant),
|
||||
Type::Str => deserialize(data).map(Value::Str),
|
||||
Type::Json => deserialize(data).map(Value::Json),
|
||||
Type::Blob => deserialize(data).map(Value::Blob),
|
||||
Type::Uuid => {
|
||||
// Processed above to avoid verbose duplication of error transforms.
|
||||
unreachable!()
|
||||
},
|
||||
}
|
||||
.map_err(|e| {
|
||||
DataError::DecodingError {
|
||||
value_type: t,
|
||||
err: e,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_bytes(&self) -> Result<Vec<u8>, DataError> {
|
||||
match self {
|
||||
Value::Bool(v) => serialize(&(Type::Bool.to_tag(), *v)),
|
||||
Value::U64(v) => serialize(&(Type::U64.to_tag(), *v)),
|
||||
Value::I64(v) => serialize(&(Type::I64.to_tag(), *v)),
|
||||
Value::F64(v) => serialize(&(Type::F64.to_tag(), v.0)),
|
||||
Value::Instant(v) => serialize(&(Type::Instant.to_tag(), *v)),
|
||||
Value::Str(v) => serialize(&(Type::Str.to_tag(), v)),
|
||||
Value::Json(v) => serialize(&(Type::Json.to_tag(), v)),
|
||||
Value::Blob(v) => serialize(&(Type::Blob.to_tag(), v)),
|
||||
Value::Uuid(v) => serialize(&(Type::Uuid.to_tag(), v)),
|
||||
}
|
||||
.map_err(DataError::EncodingError)
|
||||
}
|
||||
|
||||
pub fn serialized_size(&self) -> Result<u64, DataError> {
|
||||
match self {
|
||||
Value::Bool(v) => serialized_size(&(Type::Bool.to_tag(), *v)),
|
||||
Value::U64(v) => serialized_size(&(Type::U64.to_tag(), *v)),
|
||||
Value::I64(v) => serialized_size(&(Type::I64.to_tag(), *v)),
|
||||
Value::F64(v) => serialized_size(&(Type::F64.to_tag(), v.0)),
|
||||
Value::Instant(v) => serialized_size(&(Type::Instant.to_tag(), *v)),
|
||||
Value::Str(v) => serialized_size(&(Type::Str.to_tag(), v)),
|
||||
Value::Json(v) => serialized_size(&(Type::Json.to_tag(), v)),
|
||||
Value::Blob(v) => serialized_size(&(Type::Blob.to_tag(), v)),
|
||||
Value::Uuid(v) => serialized_size(&(Type::Uuid.to_tag(), v)),
|
||||
}
|
||||
.map_err(DataError::EncodingError)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'v> From<&'v Value<'v>> for OwnedValue {
|
||||
fn from(value: &Value) -> OwnedValue {
|
||||
match value {
|
||||
Value::Bool(v) => OwnedValue::Bool(*v),
|
||||
Value::U64(v) => OwnedValue::U64(*v),
|
||||
Value::I64(v) => OwnedValue::I64(*v),
|
||||
Value::F64(v) => OwnedValue::F64(**v),
|
||||
Value::Instant(v) => OwnedValue::Instant(*v),
|
||||
Value::Uuid(v) => OwnedValue::Uuid(Uuid::from_bytes(**v)),
|
||||
Value::Str(v) => OwnedValue::Str((*v).to_string()),
|
||||
Value::Json(v) => OwnedValue::Json((*v).to_string()),
|
||||
Value::Blob(v) => OwnedValue::Blob(v.to_vec()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'v> From<&'v OwnedValue> for Value<'v> {
|
||||
fn from(value: &OwnedValue) -> Value {
|
||||
match value {
|
||||
OwnedValue::Bool(v) => Value::Bool(*v),
|
||||
OwnedValue::U64(v) => Value::U64(*v),
|
||||
OwnedValue::I64(v) => Value::I64(*v),
|
||||
OwnedValue::F64(v) => Value::F64(OrderedFloat::from(*v)),
|
||||
OwnedValue::Instant(v) => Value::Instant(*v),
|
||||
OwnedValue::Uuid(v) => Value::Uuid(v.as_bytes()),
|
||||
OwnedValue::Str(v) => Value::Str(v),
|
||||
OwnedValue::Json(v) => Value::Json(v),
|
||||
OwnedValue::Blob(v) => Value::Blob(v),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_value_serialized_size() {
|
||||
// | Value enum | tag: 1 byte | value_payload |
|
||||
// |----------------------------------------------------------|
|
||||
// | I64 | 1 | 8 |
|
||||
// | U64 | 1 | 8 |
|
||||
// | Bool | 1 | 1 |
|
||||
// | Instant | 1 | 8 |
|
||||
// | F64 | 1 | 8 |
|
||||
// | Uuid | 1 | 16 |
|
||||
// | Str/Blob/Json | 1 |(8: len + sizeof(payload))|
|
||||
assert_eq!(Value::I64(-1000).serialized_size().unwrap(), 9);
|
||||
assert_eq!(Value::U64(1000u64).serialized_size().unwrap(), 9);
|
||||
assert_eq!(Value::Bool(true).serialized_size().unwrap(), 2);
|
||||
assert_eq!(Value::Instant(1_558_020_865_224).serialized_size().unwrap(), 9);
|
||||
assert_eq!(Value::F64(OrderedFloat(10000.1)).serialized_size().unwrap(), 9);
|
||||
assert_eq!(Value::Str("hello!").serialized_size().unwrap(), 15);
|
||||
assert_eq!(Value::Str("¡Hola").serialized_size().unwrap(), 15);
|
||||
assert_eq!(Value::Blob(b"hello!").serialized_size().unwrap(), 15);
|
||||
assert_eq!(
|
||||
uuid(b"\x9f\xe2\xc4\xe9\x3f\x65\x4f\xdb\xb2\x4c\x02\xb1\x52\x59\x71\x6c")
|
||||
.unwrap()
|
||||
.serialized_size()
|
||||
.unwrap(),
|
||||
17
|
||||
);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,187 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::fs;
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
use rkv::{
|
||||
backend::{
|
||||
Lmdb,
|
||||
SafeMode,
|
||||
},
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
Value,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_open_safe_same_dir_as_lmdb() {
|
||||
let root = Builder::new().prefix("test_open_safe_same_dir_as_lmdb").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Create database of type A and save to disk.
|
||||
{
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::create()).expect("opened");
|
||||
|
||||
let mut writer = k.write().expect("writer");
|
||||
sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
|
||||
sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
|
||||
sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
|
||||
assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
writer.commit().expect("committed");
|
||||
k.sync(true).expect("synced");
|
||||
}
|
||||
// Verify that database of type A was written to disk.
|
||||
{
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::default()).expect("opened");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
// Create database of type B and verify that it is empty.
|
||||
{
|
||||
let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let _ = k.open_single("sk", StoreOptions::default()).expect_err("not opened");
|
||||
}
|
||||
// Verify that database of type A wasn't changed.
|
||||
{
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::default()).expect("opened");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
// Create database of type B and save to disk (type A exists at the same path).
|
||||
{
|
||||
let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::create()).expect("opened");
|
||||
|
||||
let mut writer = k.write().expect("writer");
|
||||
sk.put(&mut writer, "foo1", &Value::I64(5678)).expect("wrote");
|
||||
sk.put(&mut writer, "bar1", &Value::Bool(false)).expect("wrote");
|
||||
sk.put(&mut writer, "baz1", &Value::Str("héllo~ yöu")).expect("wrote");
|
||||
assert_eq!(sk.get(&writer, "foo1").expect("read"), Some(Value::I64(5678)));
|
||||
assert_eq!(sk.get(&writer, "bar1").expect("read"), Some(Value::Bool(false)));
|
||||
assert_eq!(sk.get(&writer, "baz1").expect("read"), Some(Value::Str("héllo~ yöu")));
|
||||
writer.commit().expect("committed");
|
||||
k.sync(true).expect("synced");
|
||||
}
|
||||
// Verify that database of type B was written to disk.
|
||||
{
|
||||
let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::default()).expect("opened");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(sk.get(&reader, "foo1").expect("read"), Some(Value::I64(5678)));
|
||||
assert_eq!(sk.get(&reader, "bar1").expect("read"), Some(Value::Bool(false)));
|
||||
assert_eq!(sk.get(&reader, "baz1").expect("read"), Some(Value::Str("héllo~ yöu")));
|
||||
}
|
||||
// Verify that database of type A still wasn't changed.
|
||||
{
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::default()).expect("opened");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_open_lmdb_same_dir_as_safe() {
|
||||
let root = Builder::new().prefix("test_open_lmdb_same_dir_as_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Create database of type A and save to disk.
|
||||
{
|
||||
let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::create()).expect("opened");
|
||||
|
||||
let mut writer = k.write().expect("writer");
|
||||
sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
|
||||
sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
|
||||
sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
|
||||
assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
writer.commit().expect("committed");
|
||||
k.sync(true).expect("synced");
|
||||
}
|
||||
// Verify that database of type A was written to disk.
|
||||
{
|
||||
let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::default()).expect("opened");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
// Create database of type B and verify that it is empty.
|
||||
{
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let _ = k.open_single("sk", StoreOptions::default()).expect_err("not opened");
|
||||
}
|
||||
// Verify that database of type A wasn't changed.
|
||||
{
|
||||
let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::default()).expect("opened");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
// Create database of type B and save to disk (type A exists at the same path).
|
||||
{
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::create()).expect("opened");
|
||||
|
||||
let mut writer = k.write().expect("writer");
|
||||
sk.put(&mut writer, "foo1", &Value::I64(5678)).expect("wrote");
|
||||
sk.put(&mut writer, "bar1", &Value::Bool(false)).expect("wrote");
|
||||
sk.put(&mut writer, "baz1", &Value::Str("héllo~ yöu")).expect("wrote");
|
||||
assert_eq!(sk.get(&writer, "foo1").expect("read"), Some(Value::I64(5678)));
|
||||
assert_eq!(sk.get(&writer, "bar1").expect("read"), Some(Value::Bool(false)));
|
||||
assert_eq!(sk.get(&writer, "baz1").expect("read"), Some(Value::Str("héllo~ yöu")));
|
||||
writer.commit().expect("committed");
|
||||
k.sync(true).expect("synced");
|
||||
}
|
||||
// Verify that database of type B was written to disk.
|
||||
{
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::default()).expect("opened");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(sk.get(&reader, "foo1").expect("read"), Some(Value::I64(5678)));
|
||||
assert_eq!(sk.get(&reader, "bar1").expect("read"), Some(Value::Bool(false)));
|
||||
assert_eq!(sk.get(&reader, "baz1").expect("read"), Some(Value::Str("héllo~ yöu")));
|
||||
}
|
||||
// Verify that database of type A still wasn't changed.
|
||||
{
|
||||
let k = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let sk = k.open_single("sk", StoreOptions::default()).expect("opened");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(sk.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(sk.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,356 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
fs,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
use rkv::{
|
||||
backend::{
|
||||
Lmdb,
|
||||
SafeMode,
|
||||
},
|
||||
Migrator,
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
Value,
|
||||
};
|
||||
|
||||
macro_rules! populate_store {
|
||||
($env:expr) => {
|
||||
let store = $env.open_single("store", StoreOptions::create()).expect("opened");
|
||||
let mut writer = $env.write().expect("writer");
|
||||
store.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
|
||||
store.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
|
||||
store.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
|
||||
writer.commit().expect("committed");
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_migrator_lmdb_to_safe() {
|
||||
let root = Builder::new().prefix("test_simple_migrator_lmdb_to_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Populate source environment and persist to disk.
|
||||
{
|
||||
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
populate_store!(&src_env);
|
||||
src_env.sync(true).expect("synced");
|
||||
}
|
||||
// Check if the files were written to disk.
|
||||
{
|
||||
let mut datamdb = root.path().to_path_buf();
|
||||
let mut lockmdb = root.path().to_path_buf();
|
||||
datamdb.push("data.mdb");
|
||||
lockmdb.push("lock.mdb");
|
||||
assert!(datamdb.exists());
|
||||
assert!(lockmdb.exists());
|
||||
}
|
||||
// Verify that database was written to disk.
|
||||
{
|
||||
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let store = src_env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let reader = src_env.read().expect("reader");
|
||||
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
// Open and migrate.
|
||||
{
|
||||
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env).expect("migrated");
|
||||
}
|
||||
// Verify that the database was indeed migrated.
|
||||
{
|
||||
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let reader = dst_env.read().expect("reader");
|
||||
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
// Check if the old files were deleted from disk.
|
||||
{
|
||||
let mut datamdb = root.path().to_path_buf();
|
||||
let mut lockmdb = root.path().to_path_buf();
|
||||
datamdb.push("data.mdb");
|
||||
lockmdb.push("lock.mdb");
|
||||
assert!(!datamdb.exists());
|
||||
assert!(!lockmdb.exists());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_migrator_safe_to_lmdb() {
|
||||
let root = Builder::new().prefix("test_simple_migrator_safe_to_lmdb").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Populate source environment and persist to disk.
|
||||
{
|
||||
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
populate_store!(&src_env);
|
||||
src_env.sync(true).expect("synced");
|
||||
}
|
||||
// Check if the files were written to disk.
|
||||
{
|
||||
let mut safebin = root.path().to_path_buf();
|
||||
safebin.push("data.safe.bin");
|
||||
assert!(safebin.exists());
|
||||
}
|
||||
// Verify that database was written to disk.
|
||||
{
|
||||
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let store = src_env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let reader = src_env.read().expect("reader");
|
||||
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
// Open and migrate.
|
||||
{
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env).expect("migrated");
|
||||
}
|
||||
// Verify that the database was indeed migrated.
|
||||
{
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let reader = dst_env.read().expect("reader");
|
||||
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
// Check if the old files were deleted from disk.
|
||||
{
|
||||
let mut safebin = root.path().to_path_buf();
|
||||
safebin.push("data.safe.bin");
|
||||
assert!(!safebin.exists());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_round_trip() {
|
||||
let root = Builder::new().prefix("test_simple_migrator_lmdb_to_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Populate source environment and persist to disk.
|
||||
{
|
||||
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
populate_store!(&src_env);
|
||||
src_env.sync(true).expect("synced");
|
||||
}
|
||||
// Open and migrate.
|
||||
{
|
||||
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env).expect("migrated");
|
||||
}
|
||||
// Open and migrate back.
|
||||
{
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env).expect("migrated");
|
||||
}
|
||||
// Verify that the database was indeed migrated twice.
|
||||
{
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let reader = dst_env.read().expect("reader");
|
||||
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
// Check if the right files are finally present on disk.
|
||||
{
|
||||
let mut datamdb = root.path().to_path_buf();
|
||||
let mut lockmdb = root.path().to_path_buf();
|
||||
let mut safebin = root.path().to_path_buf();
|
||||
datamdb.push("data.mdb");
|
||||
lockmdb.push("lock.mdb");
|
||||
safebin.push("data.safe.bin");
|
||||
assert!(datamdb.exists());
|
||||
assert!(lockmdb.exists());
|
||||
assert!(!safebin.exists());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_no_dir_1() {
|
||||
let root = Builder::new().prefix("test_migrator_no_dir").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// This won't fail with IoError even though the path is a bogus path, because this
|
||||
// is the "easy mode" migration which automatically handles (ignores) this error.
|
||||
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
Migrator::easy_migrate_lmdb_to_safe_mode(Path::new("bogus"), &dst_env).expect("migrated");
|
||||
|
||||
let mut datamdb = root.path().to_path_buf();
|
||||
let mut lockmdb = root.path().to_path_buf();
|
||||
let mut safebin = root.path().to_path_buf();
|
||||
datamdb.push("data.mdb");
|
||||
lockmdb.push("lock.mdb");
|
||||
safebin.push("data.safe.bin");
|
||||
assert!(!datamdb.exists());
|
||||
assert!(!lockmdb.exists());
|
||||
assert!(!safebin.exists()); // safe mode doesn't write an empty db to disk
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_no_dir_2() {
|
||||
let root = Builder::new().prefix("test_migrator_no_dir").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// This won't fail with IoError even though the path is a bogus path, because this
|
||||
// is the "easy mode" migration which automatically handles (ignores) this error.
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
Migrator::easy_migrate_safe_mode_to_lmdb(Path::new("bogus"), &dst_env).expect("migrated");
|
||||
|
||||
let mut datamdb = root.path().to_path_buf();
|
||||
let mut lockmdb = root.path().to_path_buf();
|
||||
let mut safebin = root.path().to_path_buf();
|
||||
datamdb.push("data.mdb");
|
||||
lockmdb.push("lock.mdb");
|
||||
safebin.push("data.safe.bin");
|
||||
assert!(datamdb.exists()); // lmdb writes an empty db to disk
|
||||
assert!(lockmdb.exists());
|
||||
assert!(!safebin.exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_invalid_1() {
|
||||
let root = Builder::new().prefix("test_migrator_invalid").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let dbfile = root.path().join("data.mdb");
|
||||
fs::write(dbfile, "bogus").expect("dbfile created");
|
||||
|
||||
// This won't fail with FileInvalid even though the database is a bogus file, because this
|
||||
// is the "easy mode" migration which automatically handles (ignores) this error.
|
||||
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated");
|
||||
|
||||
let mut datamdb = root.path().to_path_buf();
|
||||
let mut lockmdb = root.path().to_path_buf();
|
||||
let mut safebin = root.path().to_path_buf();
|
||||
datamdb.push("data.mdb");
|
||||
lockmdb.push("lock.mdb");
|
||||
safebin.push("data.safe.bin");
|
||||
assert!(datamdb.exists()); // corrupted db isn't deleted
|
||||
assert!(lockmdb.exists());
|
||||
assert!(!safebin.exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_invalid_2() {
|
||||
let root = Builder::new().prefix("test_migrator_invalid").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let dbfile = root.path().join("data.safe.bin");
|
||||
fs::write(dbfile, "bogus").expect("dbfile created");
|
||||
|
||||
// This won't fail with FileInvalid even though the database is a bogus file, because this
|
||||
// is the "easy mode" migration which automatically handles (ignores) this error.
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated");
|
||||
|
||||
let mut datamdb = root.path().to_path_buf();
|
||||
let mut lockmdb = root.path().to_path_buf();
|
||||
let mut safebin = root.path().to_path_buf();
|
||||
datamdb.push("data.mdb");
|
||||
lockmdb.push("lock.mdb");
|
||||
safebin.push("data.safe.bin");
|
||||
assert!(datamdb.exists()); // lmdb writes an empty db to disk
|
||||
assert!(lockmdb.exists());
|
||||
assert!(safebin.exists()); // corrupted db isn't deleted
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "migrated: SourceEmpty")]
|
||||
fn test_migrator_lmdb_to_safe_1() {
|
||||
let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "migrated: DestinationNotEmpty")]
|
||||
fn test_migrator_lmdb_to_safe_2() {
|
||||
let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
populate_store!(&src_env);
|
||||
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
populate_store!(&dst_env);
|
||||
Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_lmdb_to_safe_3() {
|
||||
let root = Builder::new().prefix("test_migrate_lmdb_to_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
populate_store!(&src_env);
|
||||
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated");
|
||||
|
||||
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let reader = dst_env.read().expect("reader");
|
||||
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "migrated: SourceEmpty")]
|
||||
fn test_migrator_safe_to_lmdb_1() {
|
||||
let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "migrated: DestinationNotEmpty")]
|
||||
fn test_migrator_safe_to_lmdb_2() {
|
||||
let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
populate_store!(&src_env);
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
populate_store!(&dst_env);
|
||||
Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_safe_to_lmdb_3() {
|
||||
let root = Builder::new().prefix("test_migrate_safe_to_lmdb").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
populate_store!(&src_env);
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated");
|
||||
|
||||
let store = dst_env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let reader = dst_env.read().expect("reader");
|
||||
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
#![cfg(feature = "db-int-key")]
|
||||
#![allow(clippy::many_single_char_names)]
|
||||
|
||||
use std::fs;
|
||||
|
||||
use serde_derive::Serialize;
|
||||
use tempfile::Builder;
|
||||
|
||||
use rkv::{
|
||||
backend::Lmdb,
|
||||
PrimitiveInt,
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
Value,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_integer_keys() {
|
||||
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
macro_rules! test_integer_keys {
|
||||
($store:expr, $key:expr) => {{
|
||||
let mut writer = k.write().expect("writer");
|
||||
|
||||
$store.put(&mut writer, $key, &Value::Str("hello!")).expect("write");
|
||||
assert_eq!($store.get(&writer, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
assert_eq!($store.get(&reader, $key).expect("read"), Some(Value::Str("hello!")));
|
||||
}};
|
||||
}
|
||||
|
||||
// The integer module provides only the u32 integer key variant
|
||||
// of IntegerStore, so we can use it without further ado.
|
||||
test_integer_keys!(s, std::u32::MIN);
|
||||
test_integer_keys!(s, std::u32::MAX);
|
||||
|
||||
// If you want to use another integer key variant, you need to implement
|
||||
// a newtype, implement PrimitiveInt, and implement or derive Serialize
|
||||
// for it. Here we do so for the i32 type.
|
||||
|
||||
// DANGER! Doing this enables you to open a store with multiple,
|
||||
// different integer key types, which may result in unexpected behavior.
|
||||
// Make sure you know what you're doing!
|
||||
|
||||
let t = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct I32(i32);
|
||||
impl PrimitiveInt for I32 {}
|
||||
test_integer_keys!(t, I32(std::i32::MIN));
|
||||
test_integer_keys!(t, I32(std::i32::MAX));
|
||||
|
||||
let u = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct U16(u16);
|
||||
impl PrimitiveInt for U16 {}
|
||||
test_integer_keys!(u, U16(std::u16::MIN));
|
||||
test_integer_keys!(u, U16(std::u16::MAX));
|
||||
|
||||
let v = k.open_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct U64(u64);
|
||||
impl PrimitiveInt for U64 {}
|
||||
test_integer_keys!(v, U64(std::u64::MIN));
|
||||
test_integer_keys!(v, U64(std::u64::MAX));
|
||||
}
|
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
use std::{
|
||||
fs,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
use rkv::{
|
||||
backend::{
|
||||
Lmdb,
|
||||
LmdbEnvironment,
|
||||
SafeMode,
|
||||
SafeModeEnvironment,
|
||||
},
|
||||
Rkv,
|
||||
};
|
||||
|
||||
/// Test that a manager can be created with simple type inference.
|
||||
#[test]
|
||||
#[allow(clippy::let_underscore_lock)]
|
||||
fn test_simple() {
|
||||
type Manager = rkv::Manager<LmdbEnvironment>;
|
||||
|
||||
let _ = Manager::singleton().write().unwrap();
|
||||
}
|
||||
|
||||
/// Test that a manager can be created with simple type inference.
|
||||
#[test]
|
||||
#[allow(clippy::let_underscore_lock)]
|
||||
fn test_simple_safe() {
|
||||
type Manager = rkv::Manager<SafeModeEnvironment>;
|
||||
|
||||
let _ = Manager::singleton().write().unwrap();
|
||||
}
|
||||
|
||||
/// Test that a shared Rkv instance can be created with simple type inference.
|
||||
#[test]
|
||||
fn test_simple_2() {
|
||||
type Manager = rkv::Manager<LmdbEnvironment>;
|
||||
|
||||
let root = Builder::new().prefix("test_simple_2").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let mut manager = Manager::singleton().write().unwrap();
|
||||
let _ = manager.get_or_create(root.path(), Rkv::new::<Lmdb>).unwrap();
|
||||
}
|
||||
|
||||
/// Test that a shared Rkv instance can be created with simple type inference.
|
||||
#[test]
|
||||
fn test_simple_safe_2() {
|
||||
type Manager = rkv::Manager<SafeModeEnvironment>;
|
||||
|
||||
let root = Builder::new().prefix("test_simple_safe_2").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let mut manager = Manager::singleton().write().unwrap();
|
||||
let _ = manager.get_or_create(root.path(), Rkv::new::<SafeMode>).unwrap();
|
||||
}
|
||||
|
||||
/// Test that the manager will return the same Rkv instance each time for each path.
|
||||
#[test]
|
||||
fn test_same() {
|
||||
type Manager = rkv::Manager<LmdbEnvironment>;
|
||||
|
||||
let root = Builder::new().prefix("test_same").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let p = root.path();
|
||||
assert!(Manager::singleton().read().unwrap().get(p).expect("success").is_none());
|
||||
|
||||
let created_arc = Manager::singleton().write().unwrap().get_or_create(p, Rkv::new::<Lmdb>).expect("created");
|
||||
let fetched_arc = Manager::singleton().read().unwrap().get(p).expect("success").expect("existed");
|
||||
assert!(Arc::ptr_eq(&created_arc, &fetched_arc));
|
||||
}
|
||||
|
||||
/// Test that the manager will return the same Rkv instance each time for each path.
|
||||
#[test]
|
||||
fn test_same_safe() {
|
||||
type Manager = rkv::Manager<SafeModeEnvironment>;
|
||||
|
||||
let root = Builder::new().prefix("test_same_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let p = root.path();
|
||||
assert!(Manager::singleton().read().unwrap().get(p).expect("success").is_none());
|
||||
|
||||
let created_arc = Manager::singleton().write().unwrap().get_or_create(p, Rkv::new::<SafeMode>).expect("created");
|
||||
let fetched_arc = Manager::singleton().read().unwrap().get(p).expect("success").expect("existed");
|
||||
assert!(Arc::ptr_eq(&created_arc, &fetched_arc));
|
||||
}
|
||||
|
||||
/// Test that the manager will return the same Rkv instance each time for each path.
|
||||
#[test]
|
||||
fn test_same_with_capacity() {
|
||||
type Manager = rkv::Manager<LmdbEnvironment>;
|
||||
|
||||
let root = Builder::new().prefix("test_same_with_capacity").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let mut manager = Manager::singleton().write().unwrap();
|
||||
|
||||
let p = root.path();
|
||||
assert!(manager.get(p).expect("success").is_none());
|
||||
|
||||
let created_arc = manager.get_or_create_with_capacity(p, 10, Rkv::with_capacity::<Lmdb>).expect("created");
|
||||
let fetched_arc = manager.get(p).expect("success").expect("existed");
|
||||
assert!(Arc::ptr_eq(&created_arc, &fetched_arc));
|
||||
}
|
||||
|
||||
/// Test that the manager will return the same Rkv instance each time for each path.
|
||||
#[test]
|
||||
fn test_same_with_capacity_safe() {
|
||||
type Manager = rkv::Manager<SafeModeEnvironment>;
|
||||
|
||||
let root = Builder::new().prefix("test_same_with_capacity_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let mut manager = Manager::singleton().write().unwrap();
|
||||
|
||||
let p = root.path();
|
||||
assert!(manager.get(p).expect("success").is_none());
|
||||
|
||||
let created_arc = manager.get_or_create_with_capacity(p, 10, Rkv::with_capacity::<SafeMode>).expect("created");
|
||||
let fetched_arc = manager.get(p).expect("success").expect("existed");
|
||||
assert!(Arc::ptr_eq(&created_arc, &fetched_arc));
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
// Copyright 2018 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
#![cfg(all(feature = "db-dup-sort", feature = "db-int-key"))]
|
||||
#![allow(clippy::many_single_char_names)]
|
||||
|
||||
use std::fs;
|
||||
|
||||
use serde_derive::Serialize;
|
||||
use tempfile::Builder;
|
||||
|
||||
use rkv::{
|
||||
backend::Lmdb,
|
||||
PrimitiveInt,
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
Value,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_multi_integer_keys() {
|
||||
let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
macro_rules! test_integer_keys {
|
||||
($store:expr, $key:expr) => {{
|
||||
let mut writer = k.write().expect("writer");
|
||||
|
||||
$store.put(&mut writer, $key, &Value::Str("hello1")).expect("write");
|
||||
$store.put(&mut writer, $key, &Value::Str("hello2")).expect("write");
|
||||
$store.put(&mut writer, $key, &Value::Str("hello3")).expect("write");
|
||||
let vals = $store
|
||||
.get(&writer, $key)
|
||||
.expect("read")
|
||||
.map(|result| result.expect("ok"))
|
||||
.map(|(_, v)| v)
|
||||
.collect::<Vec<Value>>();
|
||||
assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]);
|
||||
writer.commit().expect("committed");
|
||||
|
||||
let reader = k.read().expect("reader");
|
||||
let vals = $store
|
||||
.get(&reader, $key)
|
||||
.expect("read")
|
||||
.map(|result| result.expect("ok"))
|
||||
.map(|(_, v)| v)
|
||||
.collect::<Vec<Value>>();
|
||||
assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]);
|
||||
}};
|
||||
}
|
||||
|
||||
// The integer module provides only the u32 integer key variant
|
||||
// of IntegerStore, so we can use it without further ado.
|
||||
test_integer_keys!(s, std::u32::MIN);
|
||||
test_integer_keys!(s, std::u32::MAX);
|
||||
|
||||
// If you want to use another integer key variant, you need to implement
|
||||
// a newtype, implement PrimitiveInt, and implement or derive Serialize
|
||||
// for it. Here we do so for the i32 type.
|
||||
|
||||
// DANGER! Doing this enables you to open a store with multiple,
|
||||
// different integer key types, which may result in unexpected behavior.
|
||||
// Make sure you know what you're doing!
|
||||
|
||||
let t = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct I32(i32);
|
||||
impl PrimitiveInt for I32 {}
|
||||
test_integer_keys!(t, I32(std::i32::MIN));
|
||||
test_integer_keys!(t, I32(std::i32::MAX));
|
||||
|
||||
let u = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct U16(u16);
|
||||
impl PrimitiveInt for U16 {}
|
||||
test_integer_keys!(u, U16(std::u16::MIN));
|
||||
test_integer_keys!(u, U16(std::u16::MAX));
|
||||
|
||||
let v = k.open_multi_integer("s", StoreOptions::create()).expect("open");
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct U64(u64);
|
||||
impl PrimitiveInt for U64 {}
|
||||
test_integer_keys!(v, U64(std::u64::MIN));
|
||||
test_integer_keys!(v, U64(std::u64::MAX));
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
// Copyright 2018-2019 Mozilla
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
#![cfg(feature = "db-dup-sort")]
|
||||
|
||||
use std::fs;
|
||||
|
||||
use tempfile::Builder;
|
||||
|
||||
use rkv::{
|
||||
backend::{
|
||||
Lmdb,
|
||||
LmdbDatabase,
|
||||
LmdbRoCursor,
|
||||
LmdbRwTransaction,
|
||||
},
|
||||
Readable,
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
Value,
|
||||
Writer,
|
||||
};
|
||||
|
||||
/// Consider a struct like this:
|
||||
/// struct Sample {
|
||||
/// id: u64,
|
||||
/// value: String,
|
||||
/// date: String,
|
||||
/// }
|
||||
/// We would like to index all of the fields so that we can search for the struct not only
|
||||
/// by ID but also by value and date. When we index the fields individually in their own
|
||||
/// tables, it is important that we run all operations within a single transaction to
|
||||
/// ensure coherence of the indices.
|
||||
/// This test features helper functions for reading and writing the parts of the struct.
|
||||
/// Note that the reader functions take `Readable` because they might run within a Read
|
||||
/// Transaction or a Write Transaction. The test demonstrates fetching values via both.
|
||||
|
||||
type SingleStore = rkv::SingleStore<LmdbDatabase>;
|
||||
type MultiStore = rkv::MultiStore<LmdbDatabase>;
|
||||
|
||||
#[test]
|
||||
fn read_many() {
|
||||
let root = Builder::new().prefix("test_txns").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
let k = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
let samplestore = k.open_single("s", StoreOptions::create()).expect("open");
|
||||
let datestore = k.open_multi("m", StoreOptions::create()).expect("open");
|
||||
let valuestore = k.open_multi("m", StoreOptions::create()).expect("open");
|
||||
|
||||
{
|
||||
let mut writer = k.write().expect("env write lock");
|
||||
|
||||
for id in 0..30_u64 {
|
||||
let value = format!("value{}", id);
|
||||
let date = format!("2019-06-{}", id);
|
||||
put_id_field(&mut writer, datestore, &date, id);
|
||||
put_id_field(&mut writer, valuestore, &value, id);
|
||||
put_sample(&mut writer, samplestore, id, &value);
|
||||
}
|
||||
|
||||
// now we read in the same transaction
|
||||
for id in 0..30_u64 {
|
||||
let value = format!("value{}", id);
|
||||
let date = format!("2019-06-{}", id);
|
||||
let ids = get_ids_by_field(&writer, datestore, &date);
|
||||
let ids2 = get_ids_by_field(&writer, valuestore, &value);
|
||||
let samples = get_samples(&writer, samplestore, &ids);
|
||||
let samples2 = get_samples(&writer, samplestore, &ids2);
|
||||
println!("{:?}, {:?}", samples, samples2);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let reader = k.read().expect("env read lock");
|
||||
for id in 0..30_u64 {
|
||||
let value = format!("value{}", id);
|
||||
let date = format!("2019-06-{}", id);
|
||||
let ids = get_ids_by_field(&reader, datestore, &date);
|
||||
let ids2 = get_ids_by_field(&reader, valuestore, &value);
|
||||
let samples = get_samples(&reader, samplestore, &ids);
|
||||
let samples2 = get_samples(&reader, samplestore, &ids2);
|
||||
println!("{:?}, {:?}", samples, samples2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ids_by_field<'t, T>(txn: &'t T, store: MultiStore, field: &'t str) -> Vec<u64>
|
||||
where
|
||||
T: Readable<'t, Database = LmdbDatabase, RoCursor = LmdbRoCursor<'t>>,
|
||||
{
|
||||
store
|
||||
.get(txn, field)
|
||||
.expect("get iterator")
|
||||
.map(|id| {
|
||||
match id.expect("field") {
|
||||
(_, Value::U64(id)) => id,
|
||||
_ => panic!("getting value in iter"),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<u64>>()
|
||||
}
|
||||
|
||||
fn get_samples<'t, T>(txn: &'t T, samplestore: SingleStore, ids: &[u64]) -> Vec<String>
|
||||
where
|
||||
T: Readable<'t, Database = LmdbDatabase, RoCursor = LmdbRoCursor<'t>>,
|
||||
{
|
||||
ids.iter()
|
||||
.map(|id| {
|
||||
let bytes = id.to_be_bytes();
|
||||
match samplestore.get(txn, &bytes).expect("fetch sample") {
|
||||
Some(Value::Str(sample)) => String::from(sample),
|
||||
Some(_) => panic!("wrong type"),
|
||||
None => panic!("no sample for this id!"),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<String>>()
|
||||
}
|
||||
|
||||
fn put_sample(txn: &mut Writer<LmdbRwTransaction>, samplestore: SingleStore, id: u64, value: &str) {
|
||||
let idbytes = id.to_be_bytes();
|
||||
samplestore.put(txn, &idbytes, &Value::Str(value)).expect("put id");
|
||||
}
|
||||
|
||||
fn put_id_field(txn: &mut Writer<LmdbRwTransaction>, store: MultiStore, field: &str, id: u64) {
|
||||
store.put(txn, field, &Value::U64(id)).expect("put id");
|
||||
}
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -333,7 +333,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "rkv"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"bincode",
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
[package]
|
||||
edition = "2018"
|
||||
name = "rkv"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
authors = ["Richard Newman <rnewman@twinql.com>", "Nan Jiang <najiang@mozilla.com>", "Myk Melez <myk@mykzilla.org>", "Victor Porof <vporof@mozilla.com>"]
|
||||
exclude = ["/tests/envs/*"]
|
||||
description = "A simple, humane, typed key-value storage solution"
|
||||
|
|
|
@ -44,7 +44,7 @@ pub struct EnvironmentBuilderImpl {
|
|||
env_path_type: EnvironmentPathType,
|
||||
env_lock_type: EnvironmentLockType,
|
||||
env_db_type: EnvironmentDefaultDbType,
|
||||
make_dir: bool,
|
||||
make_dir_if_needed: bool,
|
||||
}
|
||||
|
||||
impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
||||
|
@ -58,7 +58,7 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
|||
env_path_type: EnvironmentPathType::SubDir,
|
||||
env_lock_type: EnvironmentLockType::Lockfile,
|
||||
env_db_type: EnvironmentDefaultDbType::SingleDatabase,
|
||||
make_dir: false,
|
||||
make_dir_if_needed: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -95,11 +95,17 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
|||
self
|
||||
}
|
||||
|
||||
fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self {
|
||||
self.make_dir = make_dir;
|
||||
fn set_make_dir_if_needed(&mut self, make_dir_if_needed: bool) -> &mut Self {
|
||||
self.make_dir_if_needed = make_dir_if_needed;
|
||||
self
|
||||
}
|
||||
|
||||
fn set_discard_if_corrupted(&mut self, _discard_if_corrupted: bool) -> &mut Self {
|
||||
// Unfortunately, when opening a database, LMDB doesn't handle all the ways it could have
|
||||
// been corrupted. Prefer using the `SafeMode` backend if this is important.
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error> {
|
||||
match self.env_path_type {
|
||||
EnvironmentPathType::NoSubDir => {
|
||||
|
@ -109,7 +115,7 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
|||
},
|
||||
EnvironmentPathType::SubDir => {
|
||||
if !path.is_dir() {
|
||||
if !self.make_dir {
|
||||
if !self.make_dir_if_needed {
|
||||
return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into()));
|
||||
}
|
||||
fs::create_dir_all(path)?;
|
||||
|
|
|
@ -12,6 +12,7 @@ use std::{
|
|||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
fs,
|
||||
ops::DerefMut,
|
||||
path::{
|
||||
Path,
|
||||
PathBuf,
|
||||
|
@ -54,7 +55,8 @@ pub struct EnvironmentBuilderImpl {
|
|||
max_readers: Option<usize>,
|
||||
max_dbs: Option<usize>,
|
||||
map_size: Option<usize>,
|
||||
make_dir: bool,
|
||||
make_dir_if_needed: bool,
|
||||
discard_if_corrupted: bool,
|
||||
}
|
||||
|
||||
impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
||||
|
@ -68,7 +70,8 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
|||
max_readers: None,
|
||||
max_dbs: None,
|
||||
map_size: None,
|
||||
make_dir: false,
|
||||
make_dir_if_needed: false,
|
||||
discard_if_corrupted: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -95,8 +98,13 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
|||
self
|
||||
}
|
||||
|
||||
fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self {
|
||||
self.make_dir = make_dir;
|
||||
fn set_make_dir_if_needed(&mut self, make_dir_if_needed: bool) -> &mut Self {
|
||||
self.make_dir_if_needed = make_dir_if_needed;
|
||||
self
|
||||
}
|
||||
|
||||
fn set_discard_if_corrupted(&mut self, discard_if_corrupted: bool) -> &mut Self {
|
||||
self.discard_if_corrupted = discard_if_corrupted;
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -104,43 +112,65 @@ impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl {
|
|||
// Technically NO_SUB_DIR should change these checks here, but they're both currently
|
||||
// unimplemented with this storage backend.
|
||||
if !path.is_dir() {
|
||||
if !self.make_dir {
|
||||
if !self.make_dir_if_needed {
|
||||
return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into()));
|
||||
}
|
||||
fs::create_dir_all(path)?;
|
||||
}
|
||||
let mut env = EnvironmentImpl::new(path, self.flags, self.max_readers, self.max_dbs, self.map_size)?;
|
||||
env.read_from_disk()?;
|
||||
env.read_from_disk(self.discard_if_corrupted)?;
|
||||
Ok(env)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct EnvironmentDbs {
|
||||
pub(crate) arena: DatabaseArena,
|
||||
pub(crate) name_map: DatabaseNameMap,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct EnvironmentDbsRefMut<'a> {
|
||||
pub(crate) arena: &'a mut DatabaseArena,
|
||||
pub(crate) name_map: &'a mut DatabaseNameMap,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a mut EnvironmentDbs> for EnvironmentDbsRefMut<'a> {
|
||||
fn from(dbs: &mut EnvironmentDbs) -> EnvironmentDbsRefMut {
|
||||
EnvironmentDbsRefMut {
|
||||
arena: &mut dbs.arena,
|
||||
name_map: &mut dbs.name_map,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EnvironmentImpl {
|
||||
path: PathBuf,
|
||||
max_dbs: usize,
|
||||
arena: RwLock<DatabaseArena>,
|
||||
dbs: RwLock<DatabaseNameMap>,
|
||||
dbs: RwLock<EnvironmentDbs>,
|
||||
ro_txns: Arc<()>,
|
||||
rw_txns: Arc<()>,
|
||||
}
|
||||
|
||||
impl EnvironmentImpl {
|
||||
fn serialize(&self) -> Result<Vec<u8>, ErrorImpl> {
|
||||
let arena = self.arena.read().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
let data: HashMap<_, _> = dbs.iter().map(|(name, id)| (name, &arena[id.0])).collect();
|
||||
let data: HashMap<_, _> = dbs.name_map.iter().map(|(name, id)| (name, &dbs.arena[id.0])).collect();
|
||||
Ok(bincode::serialize(&data)?)
|
||||
}
|
||||
|
||||
fn deserialize(bytes: &[u8]) -> Result<(DatabaseArena, DatabaseNameMap), ErrorImpl> {
|
||||
fn deserialize(bytes: &[u8], discard_if_corrupted: bool) -> Result<(DatabaseArena, DatabaseNameMap), ErrorImpl> {
|
||||
let mut arena = DatabaseArena::new();
|
||||
let mut dbs = HashMap::new();
|
||||
let data: HashMap<_, _> = bincode::deserialize(&bytes)?;
|
||||
let mut name_map = HashMap::new();
|
||||
let data: HashMap<_, _> = match bincode::deserialize(&bytes) {
|
||||
Err(_) if discard_if_corrupted => Ok(HashMap::new()),
|
||||
result => result,
|
||||
}?;
|
||||
for (name, db) in data {
|
||||
dbs.insert(name, DatabaseImpl(arena.alloc(db)));
|
||||
name_map.insert(name, DatabaseImpl(arena.alloc(db)));
|
||||
}
|
||||
Ok((arena, dbs))
|
||||
Ok((arena, name_map))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,14 +195,16 @@ impl EnvironmentImpl {
|
|||
Ok(EnvironmentImpl {
|
||||
path: path.to_path_buf(),
|
||||
max_dbs: max_dbs.unwrap_or(std::usize::MAX),
|
||||
arena: RwLock::new(DatabaseArena::new()),
|
||||
dbs: RwLock::new(HashMap::new()),
|
||||
dbs: RwLock::new(EnvironmentDbs {
|
||||
arena: DatabaseArena::new(),
|
||||
name_map: HashMap::new(),
|
||||
}),
|
||||
ro_txns: Arc::new(()),
|
||||
rw_txns: Arc::new(()),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn read_from_disk(&mut self) -> Result<(), ErrorImpl> {
|
||||
pub(crate) fn read_from_disk(&mut self, discard_if_corrupted: bool) -> Result<(), ErrorImpl> {
|
||||
let mut path = Cow::from(&self.path);
|
||||
if fs::metadata(&path)?.is_dir() {
|
||||
path.to_mut().push(DEFAULT_DB_FILENAME);
|
||||
|
@ -180,9 +212,11 @@ impl EnvironmentImpl {
|
|||
if fs::metadata(&path).is_err() {
|
||||
return Ok(());
|
||||
};
|
||||
let (arena, dbs) = Self::deserialize(&fs::read(&path)?)?;
|
||||
self.arena = RwLock::new(arena);
|
||||
self.dbs = RwLock::new(dbs);
|
||||
let (arena, name_map) = Self::deserialize(&fs::read(&path)?, discard_if_corrupted)?;
|
||||
self.dbs = RwLock::new(EnvironmentDbs {
|
||||
arena,
|
||||
name_map,
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -195,12 +229,12 @@ impl EnvironmentImpl {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn dbs(&self) -> Result<RwLockReadGuard<DatabaseArena>, ErrorImpl> {
|
||||
self.arena.read().map_err(|_| ErrorImpl::EnvPoisonError)
|
||||
pub(crate) fn dbs(&self) -> Result<RwLockReadGuard<EnvironmentDbs>, ErrorImpl> {
|
||||
self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)
|
||||
}
|
||||
|
||||
pub(crate) fn dbs_mut(&self) -> Result<RwLockWriteGuard<DatabaseArena>, ErrorImpl> {
|
||||
self.arena.write().map_err(|_| ErrorImpl::EnvPoisonError)
|
||||
pub(crate) fn dbs_mut(&self) -> Result<RwLockWriteGuard<EnvironmentDbs>, ErrorImpl> {
|
||||
self.dbs.write().map_err(|_| ErrorImpl::EnvPoisonError)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -215,7 +249,7 @@ impl<'e> BackendEnvironment<'e> for EnvironmentImpl {
|
|||
|
||||
fn get_dbs(&self) -> Result<Vec<Option<String>>, Self::Error> {
|
||||
let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
Ok(dbs.keys().map(|key| key.to_owned()).collect())
|
||||
Ok(dbs.name_map.keys().map(|key| key.to_owned()).collect())
|
||||
}
|
||||
|
||||
fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error> {
|
||||
|
@ -225,8 +259,8 @@ impl<'e> BackendEnvironment<'e> for EnvironmentImpl {
|
|||
// TOOD: don't reallocate `name`.
|
||||
let key = name.map(String::from);
|
||||
let dbs = self.dbs.read().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
let id = dbs.get(&key).ok_or(ErrorImpl::DbNotFoundError)?;
|
||||
Ok(*id)
|
||||
let db = dbs.name_map.get(&key).ok_or(ErrorImpl::DbNotFoundError)?;
|
||||
Ok(*db)
|
||||
}
|
||||
|
||||
fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error> {
|
||||
|
@ -236,11 +270,13 @@ impl<'e> BackendEnvironment<'e> for EnvironmentImpl {
|
|||
// TOOD: don't reallocate `name`.
|
||||
let key = name.map(String::from);
|
||||
let mut dbs = self.dbs.write().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
let mut arena = self.arena.write().map_err(|_| ErrorImpl::EnvPoisonError)?;
|
||||
if dbs.keys().filter_map(|k| k.as_ref()).count() >= self.max_dbs && name != None {
|
||||
if dbs.name_map.keys().filter_map(|k| k.as_ref()).count() >= self.max_dbs && name != None {
|
||||
return Err(ErrorImpl::DbsFull);
|
||||
}
|
||||
let id = dbs.entry(key).or_insert_with(|| DatabaseImpl(arena.alloc(Database::new(Some(flags), None))));
|
||||
let parts = EnvironmentDbsRefMut::from(dbs.deref_mut());
|
||||
let arena = parts.arena;
|
||||
let name_map = parts.name_map;
|
||||
let id = name_map.entry(key).or_insert_with(|| DatabaseImpl(arena.alloc(Database::new(Some(flags), None))));
|
||||
Ok(*id)
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ pub struct RoTransactionImpl<'t> {
|
|||
|
||||
impl<'t> RoTransactionImpl<'t> {
|
||||
pub(crate) fn new(env: &'t EnvironmentImpl, idx: Arc<()>) -> Result<RoTransactionImpl<'t>, ErrorImpl> {
|
||||
let snapshots = env.dbs()?.iter().map(|(id, db)| (DatabaseImpl(id), db.snapshot())).collect();
|
||||
let snapshots = env.dbs()?.arena.iter().map(|(id, db)| (DatabaseImpl(id), db.snapshot())).collect();
|
||||
Ok(RoTransactionImpl {
|
||||
env,
|
||||
snapshots,
|
||||
|
@ -78,7 +78,7 @@ pub struct RwTransactionImpl<'t> {
|
|||
|
||||
impl<'t> RwTransactionImpl<'t> {
|
||||
pub(crate) fn new(env: &'t EnvironmentImpl, idx: Arc<()>) -> Result<RwTransactionImpl<'t>, ErrorImpl> {
|
||||
let snapshots = env.dbs()?.iter().map(|(id, db)| (DatabaseImpl(id), db.snapshot())).collect();
|
||||
let snapshots = env.dbs()?.arena.iter().map(|(id, db)| (DatabaseImpl(id), db.snapshot())).collect();
|
||||
Ok(RwTransactionImpl {
|
||||
env,
|
||||
snapshots,
|
||||
|
@ -144,7 +144,7 @@ impl<'t> BackendRwTransaction for RwTransactionImpl<'t> {
|
|||
let mut dbs = self.env.dbs_mut()?;
|
||||
|
||||
for (id, snapshot) in self.snapshots {
|
||||
let db = dbs.get_mut(id.0).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
let db = dbs.arena.get_mut(id.0).ok_or_else(|| ErrorImpl::DbIsForeignError)?;
|
||||
db.replace(snapshot);
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,9 @@ pub trait BackendEnvironmentBuilder<'b>: Debug + Eq + PartialEq + Copy + Clone {
|
|||
|
||||
fn set_map_size(&mut self, size: usize) -> &mut Self;
|
||||
|
||||
fn set_make_dir_if_needed(&mut self, make_dir: bool) -> &mut Self;
|
||||
fn set_make_dir_if_needed(&mut self, make_dir_if_needed: bool) -> &mut Self;
|
||||
|
||||
fn set_discard_if_corrupted(&mut self, discard_if_corrupted: bool) -> &mut Self;
|
||||
|
||||
fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error>;
|
||||
}
|
||||
|
|
|
@ -30,13 +30,17 @@ use crate::{
|
|||
BackendRwCursorTransaction,
|
||||
SafeModeError,
|
||||
},
|
||||
error::StoreError,
|
||||
error::{
|
||||
CloseError,
|
||||
StoreError,
|
||||
},
|
||||
readwrite::{
|
||||
Reader,
|
||||
Writer,
|
||||
},
|
||||
store::{
|
||||
single::SingleStore,
|
||||
CloseOptions,
|
||||
Options as StoreOptions,
|
||||
},
|
||||
};
|
||||
|
@ -310,15 +314,16 @@ where
|
|||
self.env.set_map_size(size).map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Closes this environment and deletes all its files from disk. Doesn't delete the
|
||||
/// folder used when opening the environment.
|
||||
pub fn close_and_delete(self) -> Result<(), StoreError> {
|
||||
/// Closes this environment and optionally deletes all its files from disk. Doesn't
|
||||
/// delete the folder used when opening the environment.
|
||||
pub fn close(self, options: CloseOptions) -> Result<(), CloseError> {
|
||||
let files = self.env.get_files_on_disk();
|
||||
self.sync(true)?;
|
||||
drop(self);
|
||||
|
||||
for file in files {
|
||||
fs::remove_file(file)?;
|
||||
if options.delete {
|
||||
for file in files {
|
||||
fs::remove_file(file)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
|
@ -131,11 +131,41 @@ impl<T> From<sync::PoisonError<T>> for StoreError {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum CloseError {
|
||||
#[fail(display = "manager poisoned")]
|
||||
ManagerPoisonError,
|
||||
|
||||
#[fail(display = "close attempted while manager has an environment still open")]
|
||||
EnvironmentStillOpen,
|
||||
|
||||
#[fail(display = "close attempted while an environment not known to the manager is still open")]
|
||||
UnknownEnvironmentStillOpen,
|
||||
|
||||
#[fail(display = "I/O error: {:?}", _0)]
|
||||
IoError(io::Error),
|
||||
}
|
||||
|
||||
impl<T> From<sync::PoisonError<T>> for CloseError {
|
||||
fn from(_: sync::PoisonError<T>) -> CloseError {
|
||||
CloseError::ManagerPoisonError
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for CloseError {
|
||||
fn from(e: io::Error) -> CloseError {
|
||||
CloseError::IoError(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum MigrateError {
|
||||
#[fail(display = "store error: {}", _0)]
|
||||
StoreError(StoreError),
|
||||
|
||||
#[fail(display = "close error: {}", _0)]
|
||||
CloseError(CloseError),
|
||||
|
||||
#[fail(display = "manager poisoned")]
|
||||
ManagerPoisonError,
|
||||
|
||||
|
@ -152,6 +182,12 @@ impl From<StoreError> for MigrateError {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<CloseError> for MigrateError {
|
||||
fn from(e: CloseError) -> MigrateError {
|
||||
MigrateError::CloseError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<sync::PoisonError<T>> for MigrateError {
|
||||
fn from(_: sync::PoisonError<T>) -> MigrateError {
|
||||
MigrateError::ManagerPoisonError
|
||||
|
|
|
@ -234,6 +234,7 @@ pub use readwrite::{
|
|||
pub use store::{
|
||||
keys::EncodableKey,
|
||||
single::SingleStore,
|
||||
CloseOptions,
|
||||
Options as StoreOptions,
|
||||
};
|
||||
pub use value::{
|
||||
|
|
|
@ -34,12 +34,17 @@ use crate::{
|
|||
LmdbEnvironment,
|
||||
SafeModeEnvironment,
|
||||
},
|
||||
error::StoreError,
|
||||
error::{
|
||||
CloseError,
|
||||
StoreError,
|
||||
},
|
||||
helpers::canonicalize_path,
|
||||
store::CloseOptions,
|
||||
Rkv,
|
||||
};
|
||||
|
||||
type Result<T> = result::Result<T, StoreError>;
|
||||
type CloseResult<T> = result::Result<T, CloseError>;
|
||||
type SharedRkv<E> = Arc<RwLock<Rkv<E>>>;
|
||||
|
||||
lazy_static! {
|
||||
|
@ -146,10 +151,9 @@ where
|
|||
})
|
||||
}
|
||||
|
||||
/// Tries to close the specified environment and delete all its files from disk.
|
||||
/// Doesn't delete the folder used when opening the environment.
|
||||
/// This will only work if there's no other users of this environment.
|
||||
pub fn try_close_and_delete<'p, P>(&mut self, path: P) -> Result<()>
|
||||
/// Tries to close the specified environment.
|
||||
/// Returns an error when other users of this environment still exist.
|
||||
pub fn try_close<'p, P>(&mut self, path: P, options: CloseOptions) -> CloseResult<()>
|
||||
where
|
||||
P: Into<&'p Path>,
|
||||
{
|
||||
|
@ -159,16 +163,14 @@ where
|
|||
canonicalize_path(path)?
|
||||
};
|
||||
match self.environments.entry(canonical) {
|
||||
Entry::Vacant(_) => {}, // noop
|
||||
Entry::Vacant(_) => Ok(()),
|
||||
Entry::Occupied(e) if Arc::strong_count(e.get()) > 1 => Err(CloseError::EnvironmentStillOpen),
|
||||
Entry::Occupied(e) => {
|
||||
if Arc::strong_count(e.get()) == 1 {
|
||||
if let Ok(env) = Arc::try_unwrap(e.remove()) {
|
||||
env.into_inner()?.close_and_delete()?;
|
||||
}
|
||||
}
|
||||
let env = Arc::try_unwrap(e.remove()).map_err(|_| CloseError::UnknownEnvironmentStillOpen)?;
|
||||
env.into_inner()?.close(options)?;
|
||||
Ok(())
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,8 +28,8 @@
|
|||
//! handling all errors.
|
||||
//! * `easy_migrate_<src>_to_<dst>` which is similar to the above, but ignores the
|
||||
//! migration and doesn't delete any files if the source environment is invalid
|
||||
//! (corrupted), unavailable (path not found or incompatible with configuration), or
|
||||
//! empty (database has no records).
|
||||
//! (corrupted), unavailable (path not accessible or incompatible with configuration),
|
||||
//! or empty (database has no records).
|
||||
//!
|
||||
//! The tool currently has these limitations:
|
||||
//!
|
||||
|
@ -100,15 +100,18 @@ macro_rules! fn_migrator {
|
|||
};
|
||||
|
||||
(open $migrate:tt, $name:tt, $builder:tt, $src_env:ty, $dst_env:ty) => {
|
||||
/// Same as the non `open_*` migration method, but automatically attempts to open the
|
||||
/// source environment. Finally, deletes all of its supporting files if there's no other
|
||||
/// environment open at that path.
|
||||
/// Same as the the `migrate_x_to_y` migration method above, but automatically attempts
|
||||
/// to open the source environment. Finally, deletes all of its supporting files if
|
||||
/// there's no other environment open at that path and the migration succeeded.
|
||||
pub fn $name<F, D>(path: &std::path::Path, build: F, dst_env: D) -> Result<(), MigrateError>
|
||||
where
|
||||
F: FnOnce(crate::backend::$builder) -> crate::backend::$builder,
|
||||
D: std::ops::Deref<Target = Rkv<$dst_env>>,
|
||||
{
|
||||
use crate::backend::*;
|
||||
use crate::{
|
||||
backend::*,
|
||||
CloseOptions,
|
||||
};
|
||||
|
||||
let mut manager = crate::Manager::<$src_env>::singleton().write()?;
|
||||
let mut builder = Rkv::<$src_env>::environment_builder::<$builder>();
|
||||
|
@ -119,24 +122,37 @@ macro_rules! fn_migrator {
|
|||
Migrator::$migrate(src_env.read()?, dst_env)?;
|
||||
|
||||
drop(src_env);
|
||||
manager.try_close_and_delete(path)?;
|
||||
manager.try_close(path, CloseOptions::delete_files_on_disk())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
|
||||
(easy $migrate:tt, $name:tt, $src_env:ty, $dst_env:ty) => {
|
||||
/// Same as the `open_*` migration method, but ignores the migration and doesn't delete
|
||||
/// any files if the source environment is invalid (corrupted), unavailable, or empty.
|
||||
/// Same as the `open_and_migrate_x_to_y` migration method above, but ignores the
|
||||
/// migration and doesn't delete any files if the following conditions apply:
|
||||
/// - Source environment is invalid (corrupted), unavailable, or empty.
|
||||
/// - Destination environment is not empty.
|
||||
/// Use this instead of the other migration methods if:
|
||||
/// - You're not concerned by throwing away old data and starting fresh with a new store.
|
||||
/// - You'll never want to overwrite data in the new store from the old store.
|
||||
pub fn $name<D>(path: &std::path::Path, dst_env: D) -> Result<(), MigrateError>
|
||||
where
|
||||
D: std::ops::Deref<Target = Rkv<$dst_env>>,
|
||||
{
|
||||
match Migrator::$migrate(path, |builder| builder, dst_env) {
|
||||
// Source environment is corrupted.
|
||||
Err(crate::MigrateError::StoreError(crate::StoreError::FileInvalid)) => Ok(()),
|
||||
// Path not accessible.
|
||||
Err(crate::MigrateError::StoreError(crate::StoreError::IoError(_))) => Ok(()),
|
||||
// Path accessible but incompatible for configuration.
|
||||
Err(crate::MigrateError::StoreError(crate::StoreError::UnsuitableEnvironmentPath(_))) => Ok(()),
|
||||
// Couldn't close source environment and delete files on disk (e.g. other stores still open).
|
||||
Err(crate::MigrateError::CloseError(_)) => Ok(()),
|
||||
// Nothing to migrate.
|
||||
Err(crate::MigrateError::SourceEmpty) => Ok(()),
|
||||
// Migrating would overwrite.
|
||||
Err(crate::MigrateError::DestinationNotEmpty) => Ok(()),
|
||||
result => result,
|
||||
}?;
|
||||
|
||||
|
|
|
@ -39,3 +39,16 @@ where
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Copy, Clone)]
|
||||
pub struct CloseOptions {
|
||||
pub delete: bool,
|
||||
}
|
||||
|
||||
impl CloseOptions {
|
||||
pub fn delete_files_on_disk() -> CloseOptions {
|
||||
CloseOptions {
|
||||
delete: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,8 +18,11 @@ use tempfile::Builder;
|
|||
use rkv::{
|
||||
backend::{
|
||||
Lmdb,
|
||||
LmdbEnvironment,
|
||||
SafeMode,
|
||||
SafeModeEnvironment,
|
||||
},
|
||||
Manager,
|
||||
Migrator,
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
|
@ -38,8 +41,8 @@ macro_rules! populate_store {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_migrator_lmdb_to_safe() {
|
||||
let root = Builder::new().prefix("test_simple_migrator_lmdb_to_safe").tempdir().expect("tempdir");
|
||||
fn test_open_migrator_lmdb_to_safe() {
|
||||
let root = Builder::new().prefix("test_open_migrator_lmdb_to_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Populate source environment and persist to disk.
|
||||
|
@ -92,8 +95,8 @@ fn test_simple_migrator_lmdb_to_safe() {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_migrator_safe_to_lmdb() {
|
||||
let root = Builder::new().prefix("test_simple_migrator_safe_to_lmdb").tempdir().expect("tempdir");
|
||||
fn test_open_migrator_safe_to_lmdb() {
|
||||
let root = Builder::new().prefix("test_open_migrator_safe_to_lmdb").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Populate source environment and persist to disk.
|
||||
|
@ -140,8 +143,8 @@ fn test_simple_migrator_safe_to_lmdb() {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_round_trip() {
|
||||
let root = Builder::new().prefix("test_simple_migrator_lmdb_to_safe").tempdir().expect("tempdir");
|
||||
fn test_open_migrator_round_trip() {
|
||||
let root = Builder::new().prefix("test_open_migrator_lmdb_to_safe").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Populate source environment and persist to disk.
|
||||
|
@ -184,8 +187,8 @@ fn test_migrator_round_trip() {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_no_dir_1() {
|
||||
let root = Builder::new().prefix("test_migrator_no_dir").tempdir().expect("tempdir");
|
||||
fn test_easy_migrator_no_dir_1() {
|
||||
let root = Builder::new().prefix("test_easy_migrator_no_dir").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// This won't fail with IoError even though the path is a bogus path, because this
|
||||
|
@ -205,8 +208,8 @@ fn test_migrator_no_dir_1() {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_no_dir_2() {
|
||||
let root = Builder::new().prefix("test_migrator_no_dir").tempdir().expect("tempdir");
|
||||
fn test_easy_migrator_no_dir_2() {
|
||||
let root = Builder::new().prefix("test_easy_migrator_no_dir").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// This won't fail with IoError even though the path is a bogus path, because this
|
||||
|
@ -226,8 +229,8 @@ fn test_migrator_no_dir_2() {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_invalid_1() {
|
||||
let root = Builder::new().prefix("test_migrator_invalid").tempdir().expect("tempdir");
|
||||
fn test_easy_migrator_invalid_1() {
|
||||
let root = Builder::new().prefix("test_easy_migrator_invalid").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let dbfile = root.path().join("data.mdb");
|
||||
|
@ -250,8 +253,8 @@ fn test_migrator_invalid_1() {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn test_migrator_invalid_2() {
|
||||
let root = Builder::new().prefix("test_migrator_invalid").tempdir().expect("tempdir");
|
||||
fn test_easy_migrator_invalid_2() {
|
||||
let root = Builder::new().prefix("test_easy_migrator_invalid").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let dbfile = root.path().join("data.safe.bin");
|
||||
|
@ -354,3 +357,115 @@ fn test_migrator_safe_to_lmdb_3() {
|
|||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_easy_migrator_failed_migration_1() {
|
||||
let root = Builder::new().prefix("test_easy_migrator_failed_migration_1").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let dbfile = root.path().join("data.mdb");
|
||||
fs::write(&dbfile, "bogus").expect("bogus dbfile created");
|
||||
|
||||
// This won't fail with FileInvalid even though the database is a bogus file, because this
|
||||
// is the "easy mode" migration which automatically handles (ignores) this error.
|
||||
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated");
|
||||
|
||||
// Populate destination environment and persist to disk.
|
||||
populate_store!(&dst_env);
|
||||
dst_env.sync(true).expect("synced");
|
||||
|
||||
// Delete bogus file and create a valid source environment in its place.
|
||||
fs::remove_file(&dbfile).expect("bogus dbfile removed");
|
||||
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
populate_store!(&src_env);
|
||||
src_env.sync(true).expect("synced");
|
||||
|
||||
// Attempt to migrate again. This should *NOT* fail with DestinationNotEmpty.
|
||||
Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_easy_migrator_failed_migration_2() {
|
||||
let root = Builder::new().prefix("test_easy_migrator_failed_migration_2").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
let dbfile = root.path().join("data.safe.bin");
|
||||
fs::write(&dbfile, "bogus").expect("bogus dbfile created");
|
||||
|
||||
// This won't fail with FileInvalid even though the database is a bogus file, because this
|
||||
// is the "easy mode" migration which automatically handles (ignores) this error.
|
||||
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
|
||||
Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated");
|
||||
|
||||
// Populate destination environment and persist to disk.
|
||||
populate_store!(&dst_env);
|
||||
dst_env.sync(true).expect("synced");
|
||||
|
||||
// Delete bogus file and create a valid source environment in its place.
|
||||
fs::remove_file(&dbfile).expect("bogus dbfile removed");
|
||||
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
|
||||
populate_store!(&src_env);
|
||||
src_env.sync(true).expect("synced");
|
||||
|
||||
// Attempt to migrate again. This should *NOT* fail with DestinationNotEmpty.
|
||||
Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated");
|
||||
}
|
||||
|
||||
fn test_easy_migrator_from_manager_failed_migration_1() {
|
||||
let root = Builder::new().prefix("test_easy_migrator_from_manager_failed_migration_1").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
{
|
||||
let mut src_manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
|
||||
let created_src_arc = src_manager.get_or_create(root.path(), Rkv::new::<Lmdb>).unwrap();
|
||||
let src_env = created_src_arc.read().unwrap();
|
||||
populate_store!(&src_env);
|
||||
src_env.sync(true).expect("synced");
|
||||
}
|
||||
{
|
||||
let mut dst_manager = Manager::<SafeModeEnvironment>::singleton().write().unwrap();
|
||||
let created_dst_arc_1 = dst_manager.get_or_create(root.path(), Rkv::new::<SafeMode>).unwrap();
|
||||
let dst_env_1 = created_dst_arc_1.read().unwrap();
|
||||
populate_store!(&dst_env_1);
|
||||
dst_env_1.sync(true).expect("synced");
|
||||
}
|
||||
|
||||
// Attempt to migrate again in a new env. This should *NOT* fail with DestinationNotEmpty.
|
||||
let dst_manager = Manager::<SafeModeEnvironment>::singleton().read().unwrap();
|
||||
let created_dst_arc_2 = dst_manager.get(root.path()).unwrap().unwrap();
|
||||
let dst_env_2 = created_dst_arc_2.read().unwrap();
|
||||
Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), dst_env_2).expect("migrated");
|
||||
}
|
||||
|
||||
fn test_easy_migrator_from_manager_failed_migration_2() {
|
||||
let root = Builder::new().prefix("test_easy_migrator_from_manager_failed_migration_2").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
{
|
||||
let mut src_manager = Manager::<SafeModeEnvironment>::singleton().write().unwrap();
|
||||
let created_src_arc = src_manager.get_or_create(root.path(), Rkv::new::<SafeMode>).unwrap();
|
||||
let src_env = created_src_arc.read().unwrap();
|
||||
populate_store!(&src_env);
|
||||
src_env.sync(true).expect("synced");
|
||||
}
|
||||
{
|
||||
let mut dst_manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
|
||||
let created_dst_arc_1 = dst_manager.get_or_create(root.path(), Rkv::new::<Lmdb>).unwrap();
|
||||
let dst_env_1 = created_dst_arc_1.read().unwrap();
|
||||
populate_store!(&dst_env_1);
|
||||
dst_env_1.sync(true).expect("synced");
|
||||
}
|
||||
|
||||
// Attempt to migrate again in a new env. This should *NOT* fail with DestinationNotEmpty.
|
||||
let dst_manager = Manager::<LmdbEnvironment>::singleton().read().unwrap();
|
||||
let created_dst_arc_2 = dst_manager.get(root.path()).unwrap().unwrap();
|
||||
let dst_env_2 = created_dst_arc_2.read().unwrap();
|
||||
Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), dst_env_2).expect("migrated");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_easy_migrator_from_manager_failed_migration() {
|
||||
test_easy_migrator_from_manager_failed_migration_1();
|
||||
test_easy_migrator_from_manager_failed_migration_2();
|
||||
}
|
||||
|
|
|
@ -17,12 +17,16 @@ use tempfile::Builder;
|
|||
|
||||
use rkv::{
|
||||
backend::{
|
||||
BackendEnvironmentBuilder,
|
||||
Lmdb,
|
||||
LmdbEnvironment,
|
||||
SafeMode,
|
||||
SafeModeEnvironment,
|
||||
},
|
||||
CloseOptions,
|
||||
Rkv,
|
||||
StoreOptions,
|
||||
Value,
|
||||
};
|
||||
|
||||
/// Test that a manager can be created with simple type inference.
|
||||
|
@ -134,3 +138,120 @@ fn test_same_with_capacity_safe() {
|
|||
let fetched_arc = manager.get(p).expect("success").expect("existed");
|
||||
assert!(Arc::ptr_eq(&created_arc, &fetched_arc));
|
||||
}
|
||||
|
||||
/// Some storage drivers are able to discard when the database is corrupted at runtime.
|
||||
/// Test how these managers can discard corrupted databases and re-open.
|
||||
#[test]
|
||||
fn test_safe_mode_corrupt_while_open_1() {
|
||||
type Manager = rkv::Manager<SafeModeEnvironment>;
|
||||
|
||||
let root = Builder::new().prefix("test_safe_mode_corrupt_while_open_1").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Create environment.
|
||||
let mut manager = Manager::singleton().write().unwrap();
|
||||
let shared_env = manager.get_or_create(root.path(), Rkv::new::<SafeMode>).expect("created");
|
||||
let env = shared_env.read().unwrap();
|
||||
|
||||
// Write some data.
|
||||
let store = env.open_single("store", StoreOptions::create()).expect("opened");
|
||||
let mut writer = env.write().expect("writer");
|
||||
store.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
|
||||
store.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
|
||||
store.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
|
||||
writer.commit().expect("committed");
|
||||
env.sync(true).expect("synced");
|
||||
|
||||
// Verify it was flushed to disk.
|
||||
let mut safebin = root.path().to_path_buf();
|
||||
safebin.push("data.safe.bin");
|
||||
assert!(safebin.exists());
|
||||
|
||||
// Oops, corruption.
|
||||
fs::write(&safebin, "bogus").expect("dbfile corrupted");
|
||||
|
||||
// Close everything.
|
||||
drop(env);
|
||||
drop(shared_env);
|
||||
manager.try_close(root.path(), CloseOptions::default()).expect("closed without deleting");
|
||||
assert!(manager.get(root.path()).expect("success").is_none());
|
||||
|
||||
// Recreating environment fails.
|
||||
manager.get_or_create(root.path(), Rkv::new::<SafeMode>).expect_err("not created");
|
||||
assert!(manager.get(root.path()).expect("success").is_none());
|
||||
|
||||
// But we can use a builder and pass `discard_if_corrupted` to deal with it.
|
||||
let mut builder = Rkv::environment_builder::<SafeMode>();
|
||||
builder.set_discard_if_corrupted(true);
|
||||
manager.get_or_create_from_builder(root.path(), builder, Rkv::from_builder::<SafeMode>).expect("created");
|
||||
assert!(manager.get(root.path()).expect("success").is_some());
|
||||
}
|
||||
|
||||
/// Some storage drivers are able to recover when the database is corrupted at runtime.
|
||||
/// Test how these managers can recover corrupted databases while open.
|
||||
#[test]
|
||||
fn test_safe_mode_corrupt_while_open_2() {
|
||||
type Manager = rkv::Manager<SafeModeEnvironment>;
|
||||
|
||||
let root = Builder::new().prefix("test_safe_mode_corrupt_while_open_2").tempdir().expect("tempdir");
|
||||
fs::create_dir_all(root.path()).expect("dir created");
|
||||
|
||||
// Create environment.
|
||||
let mut manager = Manager::singleton().write().unwrap();
|
||||
let shared_env = manager.get_or_create(root.path(), Rkv::new::<SafeMode>).expect("created");
|
||||
let env = shared_env.read().unwrap();
|
||||
|
||||
// Write some data.
|
||||
let store = env.open_single("store", StoreOptions::create()).expect("opened");
|
||||
let mut writer = env.write().expect("writer");
|
||||
store.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
|
||||
store.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
|
||||
store.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
|
||||
writer.commit().expect("committed");
|
||||
env.sync(true).expect("synced");
|
||||
|
||||
// Verify it was flushed to disk.
|
||||
let mut safebin = root.path().to_path_buf();
|
||||
safebin.push("data.safe.bin");
|
||||
assert!(safebin.exists());
|
||||
|
||||
// Oops, corruption.
|
||||
fs::write(&safebin, "bogus").expect("dbfile corrupted");
|
||||
|
||||
// Reading still works. Magic.
|
||||
let store = env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let reader = env.read().expect("reader");
|
||||
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
reader.abort();
|
||||
|
||||
// Writing still works, dbfile will be un-corrupted.
|
||||
let store = env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let mut writer = env.write().expect("writer");
|
||||
store.put(&mut writer, "foo2", &Value::I64(5678)).expect("wrote");
|
||||
store.put(&mut writer, "bar2", &Value::Bool(false)).expect("wrote");
|
||||
store.put(&mut writer, "baz2", &Value::Str("byé, yöu")).expect("wrote");
|
||||
writer.commit().expect("committed");
|
||||
env.sync(true).expect("synced");
|
||||
|
||||
// Close everything.
|
||||
drop(env);
|
||||
drop(shared_env);
|
||||
manager.try_close(root.path(), CloseOptions::default()).expect("closed without deleting");
|
||||
assert!(manager.get(root.path()).expect("success").is_none());
|
||||
|
||||
// Recreate environment.
|
||||
let shared_env = manager.get_or_create(root.path(), Rkv::new::<SafeMode>).expect("created");
|
||||
let env = shared_env.read().unwrap();
|
||||
|
||||
// Verify that the dbfile is not corrupted.
|
||||
let store = env.open_single("store", StoreOptions::default()).expect("opened");
|
||||
let reader = env.read().expect("reader");
|
||||
assert_eq!(store.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
|
||||
assert_eq!(store.get(&reader, "bar").expect("read"), Some(Value::Bool(true)));
|
||||
assert_eq!(store.get(&reader, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
|
||||
assert_eq!(store.get(&reader, "foo2").expect("read"), Some(Value::I64(5678)));
|
||||
assert_eq!(store.get(&reader, "bar2").expect("read"), Some(Value::Bool(false)));
|
||||
assert_eq!(store.get(&reader, "baz2").expect("read"), Some(Value::Str("byé, yöu")));
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче