chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

View File

@@ -0,0 +1 @@
{"files":{".cargo_vcs_info.json":"0cc26572b740506a14fbb4922d6f8400ce5159af36f66dd5576980195eec6c22","Cargo.toml":"a6042603541ecb91ec1f5c158679c0b47090462434e2a53e65f78395994f223f","Cargo.toml.orig":"f4aa7137c73c73c175cf68cbbf78eca76082f60a4d1612d9bdfd6bba5661b9d6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"abf3dae8d5ccf7693234cb0101a205f844959366bc906a1a5b53973a73a3054c","RELEASES.md":"80b60df30e92e913d710e558f1a2de0baa8a3e49b242c66bc1a1a18de5ff4618","benches/bigint.rs":"0943fcfdc290db028bc37d63c118284c79681a52a62311cc036da80d3b9ac390","benches/factorial.rs":"ed1d276a780e7e5fe79121b941c22a00c2854dbf92fd8a5372619853ba0c13b7","benches/gcd.rs":"3cc1a3356f680a6fa625f0ece0c8dd778f4091a53a31177f2870ef9a6c858c7d","benches/rng/mod.rs":"38144fc8283955db4be72a1533328fded98986d6f9d0bc7da0b306f7d4b5ca43","benches/roots.rs":"b31846852a7215c26df228940f2e469aff32fa8805eccc2b5ee5e7280ef0eeb4","benches/shootout-pidigits.rs":"c2a48133f5b679928f7e3f4e764c78aaa8c5b811f58b86fe57fae8c63cb07136","src/bigint.rs":"e5735688e1345396c21a7f5e8328bcdf77f5a0c7068028caa74063c68a3e4ef7","src/bigint/addition.rs":"cdc28e59a5dc6fe7dbad2d8d9514b49fe1bea061e82ebefe89c518f448dc8ba9","src/bigint/arbitrary.rs":"9fb1f7f8bbc7a792d19d4c30ed2671ed2d73847d57e5f65380ae31e5db0146c6","src/bigint/bits.rs":"3d696d43bcd336690ce1412af75a38d490fd2af745c2efc7cb2fc51bace07de8","src/bigint/convert.rs":"1463ecd5e39c938ae5b8e2b685d69a85c2e795c1fa96531b2192abb11ccdf638","src/bigint/division.rs":"dea89b4efb06b77bbbf07036c454d52f4d026658bcbacad1d15302c93d61b3dd","src/bigint/multiplication.rs":"c262f732b86cc36804895df21e5ea5221944cadc1fca7903ff75a26102ba70f1","src/bigint/power.rs":"a3823654f022014c5ce2dc2fd97afa6696589d32a54cc4ec65e6c113e8613672","src/bigint/serde.rs":"a7275341ec518f27f4d955e95b6e302b786944e9462bf0c17c7087d073212943","src/bigint/shift.rs":"3f28bca2d52621133cdf567290645d30a746e663d8dacea29c172b5ed7ff3538","src/bigint/subtraction.rs":"c2aa7e0fbfc7747099d0eec77efdedaa8d79cff88b1587d640d6b1c3c7dc0e4a","src/bigrand.rs":"115ab17de1c909b22bf20374b31cf5b1e9b28cd4914bf8cd6852375c35d6a027","src/biguint.rs":"bf69f80a59b528e0743390663f9a6a8f6d3edc8643a65c8d0f7f01a8f434f478","src/biguint/addition.rs":"1429575654102731024259da7d5586a4db44387ae86c1b170083c5565b37b410","src/biguint/arbitrary.rs":"3492432d1b5e9f851ce120549bf82e89ba2b6c51f584f8d9ac6782fe10fb58dc","src/biguint/bits.rs":"509036c6c6cb083d4568f92ac852cf5f12510f98c4547d47a80e3db4282a159e","src/biguint/convert.rs":"85cdb3e60856d281f45dff602960666fe44af0ab54ba2ce26cfa4018a9309c60","src/biguint/division.rs":"13ed075f244510eb0354b899b8b9d99216d202bd4eae445ee18a444e06dd708b","src/biguint/iter.rs":"6f843751c93520817a182a2a335001ff221478499eb765d09a0a6f19ba0f7f97","src/biguint/monty.rs":"68851ce7542acdc180431905668684dcadf7d5b422073c6d8acc418ceb86f5d0","src/biguint/multiplication.rs":"e3908aecda0fbbf8c97c96d1d9b24ff9bbe4e09df3b15e460e00c73ecf1c0e4e","src/biguint/power.rs":"4740974546f85da802f7fee4a7c595a8d1b46611914130237cd26c20c268db84","src/biguint/serde.rs":"f4d4e6f2d56c9bab95d8b178682ebeba88965a36bcff42ccade3334712c9977a","src/biguint/shift.rs":"4962ff0d8d0371e7ab39b00a9fc29907e5cd37b9819f61d5ef82ff13b0f55071","src/biguint/subtraction.rs":"b124b9a12bdb67cadb98050feba8cd04fcd22e985c2f994b7c473d38b94f4821","src/lib.rs":"70493e8293b769076d6a16df962a4913b74aa1925fe8bd448865da097ee2dbe1","src/macros.rs":"ab2a5b6e39538f62fdf39f6f3695e9f2f532715f40109795a787aadbb2b8ea9a","tests/bigint.rs":"929422608f842cfb0bdd5117f5af74c954704d3c84df7da46b0eea1fdedd5ba8","tests/bigint_bitwise.rs":"a69d59c3eb07867f9da66ad9a8ae101a15366f88fb1f72f2015a9fe4de3518db","tests/bigint_scalar.rs":"a87e801e370686985d44e1f020c69fceca72b9f048e0f7301d2b8d38469e5636","tests/biguint.rs":"61fb3cb51ab2102330225adbfbbef9934defc23be9ab04e4c09129cd67b6f380","tests/biguint_scalar.rs":"b09cda9d4fe6ec519e93282653f69b57d70db73b9cb59c0ea5cd0861ca2de266","tests/consts/mod.rs":"f077d1aea22895a29df4e1391b40cc983cde9389f97e5b30923765f61dc3d017","tests/fuzzed.rs":"f60a84c446ea2f45d87eb4ee64682ea63fdef05bc74f482739d4e968960e8f4e","tests/macros/mod.rs":"1a8f9f015e5caaac60ce9ccff01a75ae489801c3ede6e7b9b3c5079b6efefc9c","tests/modpow.rs":"f4c81111308d63876ed02229cbab50bec9bac082bfa71f481120e46b5d7e5560","tests/roots.rs":"1c1576309eef9a6eb7ac8db4358fcf1d5b55403deaf27fd40ba2aa61b9567c87"},"package":"a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9"}

View File

@@ -0,0 +1,6 @@
{
"git": {
"sha1": "a25836ec6c341d1aa40c97335842f330b6a62911"
},
"path_in_vcs": ""
}

107
vendor/num-bigint/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,107 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.60"
name = "num-bigint"
version = "0.4.6"
authors = ["The Rust Project Developers"]
exclude = [
"/ci/*",
"/.github/*",
]
description = "Big integer implementation for Rust"
homepage = "https://github.com/rust-num/num-bigint"
documentation = "https://docs.rs/num-bigint"
readme = "README.md"
keywords = [
"mathematics",
"numerics",
"bignum",
]
categories = [
"algorithms",
"data-structures",
"science",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-num/num-bigint"
[package.metadata.docs.rs]
features = [
"std",
"serde",
"rand",
"quickcheck",
"arbitrary",
]
rustdoc-args = [
"--cfg",
"docsrs",
]
[[bench]]
name = "bigint"
[[bench]]
name = "factorial"
[[bench]]
name = "gcd"
[[bench]]
name = "roots"
[[bench]]
name = "shootout-pidigits"
harness = false
[dependencies.arbitrary]
version = "1"
optional = true
default-features = false
[dependencies.num-integer]
version = "0.1.46"
features = ["i128"]
default-features = false
[dependencies.num-traits]
version = "0.2.18"
features = ["i128"]
default-features = false
[dependencies.quickcheck]
version = "1"
optional = true
default-features = false
[dependencies.rand]
version = "0.8"
optional = true
default-features = false
[dependencies.serde]
version = "1.0"
optional = true
default-features = false
[features]
arbitrary = ["dep:arbitrary"]
default = ["std"]
quickcheck = ["dep:quickcheck"]
rand = ["dep:rand"]
serde = ["dep:serde"]
std = [
"num-integer/std",
"num-traits/std",
]

201
vendor/num-bigint/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
vendor/num-bigint/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2014 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

84
vendor/num-bigint/README.md vendored Normal file
View File

@@ -0,0 +1,84 @@
# num-bigint
[![crate](https://img.shields.io/crates/v/num-bigint.svg)](https://crates.io/crates/num-bigint)
[![documentation](https://docs.rs/num-bigint/badge.svg)](https://docs.rs/num-bigint)
[![minimum rustc 1.60](https://img.shields.io/badge/rustc-1.60+-red.svg)](https://rust-lang.github.io/rfcs/2495-min-rust-version.html)
[![build status](https://github.com/rust-num/num-bigint/workflows/master/badge.svg)](https://github.com/rust-num/num-bigint/actions)
Big integer types for Rust, `BigInt` and `BigUint`.
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
num-bigint = "0.4"
```
## Features
The `std` crate feature is enabled by default, and is mandatory before Rust
1.36 and the stabilized `alloc` crate. If you depend on `num-bigint` with
`default-features = false`, you must manually enable the `std` feature yourself
if your compiler is not new enough.
### Random Generation
`num-bigint` supports the generation of random big integers when the `rand`
feature is enabled. To enable it include rand as
```toml
rand = "0.8"
num-bigint = { version = "0.4", features = ["rand"] }
```
Note that you must use the version of `rand` that `num-bigint` is compatible
with: `0.8`.
## Releases
Release notes are available in [RELEASES.md](RELEASES.md).
## Compatibility
The `num-bigint` crate is tested for rustc 1.60 and greater.
## Alternatives
While `num-bigint` strives for good performance in pure Rust code, other
crates may offer better performance with different trade-offs. The following
table offers a brief comparison to a few alternatives.
| Crate | License | Min rustc | Implementation | Features |
| :--------------- | :------------- | :-------- | :------------- | :------- |
| **`num-bigint`** | MIT/Apache-2.0 | 1.60 | pure rust | dynamic width, number theoretical functions |
| [`awint`] | MIT/Apache-2.0 | 1.66 | pure rust | fixed width, heap or stack, concatenation macros |
| [`bnum`] | MIT/Apache-2.0 | 1.65 | pure rust | fixed width, parity with Rust primitives including floats |
| [`crypto-bigint`] | MIT/Apache-2.0 | 1.73 | pure rust | fixed width, stack only |
| [`ibig`] | MIT/Apache-2.0 | 1.49 | pure rust | dynamic width, number theoretical functions |
| [`rug`] | LGPL-3.0+ | 1.65 | bundles [GMP] via [`gmp-mpfr-sys`] | all the features of GMP, MPFR, and MPC |
[`awint`]: https://crates.io/crates/awint
[`bnum`]: https://crates.io/crates/bnum
[`crypto-bigint`]: https://crates.io/crates/crypto-bigint
[`ibig`]: https://crates.io/crates/ibig
[`rug`]: https://crates.io/crates/rug
[GMP]: https://gmplib.org/
[`gmp-mpfr-sys`]: https://crates.io/crates/gmp-mpfr-sys
## License
Licensed under either of
* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
* [MIT license](http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

343
vendor/num-bigint/RELEASES.md vendored Normal file
View File

@@ -0,0 +1,343 @@
# Release 0.4.6 (2024-06-27)
- [Fixed compilation on `x86_64-unknown-linux-gnux32`.][312]
**Contributors**: @cuviper, @ralphtandetzky, @yhx-12243
[312]: https://github.com/rust-num/num-bigint/pull/312
# Release 0.4.5 (2024-05-06)
- [Upgrade to 2021 edition, **MSRV 1.60**][292]
- [Add `const ZERO` and implement `num_traits::ConstZero`][298]
- [Add `modinv` methods for the modular inverse][288]
- [Optimize multiplication with imbalanced operands][295]
- [Optimize scalar division on x86 and x86-64][236]
**Contributors**: @cuviper, @joelonsql, @waywardmonkeys
[236]: https://github.com/rust-num/num-bigint/pull/236
[288]: https://github.com/rust-num/num-bigint/pull/288
[292]: https://github.com/rust-num/num-bigint/pull/292
[295]: https://github.com/rust-num/num-bigint/pull/295
[298]: https://github.com/rust-num/num-bigint/pull/298
# Release 0.4.4 (2023-08-22)
- [Implemented `From<bool>` for `BigInt` and `BigUint`.][239]
- [Implemented `num_traits::Euclid` and `CheckedEuclid` for `BigInt` and `BigUint`.][245]
- [Implemented ties-to-even for `BigInt` and `BigUint::to_f32` and `to_f64`.][271]
- [Implemented `num_traits::FromBytes` and `ToBytes` for `BigInt` and `BigUint`.][276]
- Limited pre-allocation from serde size hints against potential OOM.
- Miscellaneous other code cleanups and maintenance tasks.
**Contributors**: @AaronKutch, @archseer, @cuviper, @dramforever, @icecream17,
@icedrocket, @janmarthedal, @jaybosamiya, @OliveIsAWord, @PatrickNorton,
@smoelius, @waywardmonkeys
[239]: https://github.com/rust-num/num-bigint/pull/239
[245]: https://github.com/rust-num/num-bigint/pull/245
[271]: https://github.com/rust-num/num-bigint/pull/271
[276]: https://github.com/rust-num/num-bigint/pull/276
# Release 0.4.3 (2021-11-02)
- [GHSA-v935-pqmr-g8v9]: [Fix unexpected panics in multiplication.][228]
**Contributors**: @arvidn, @cuviper, @guidovranken
[228]: https://github.com/rust-num/num-bigint/pull/228
[GHSA-v935-pqmr-g8v9]: https://github.com/rust-num/num-bigint/security/advisories/GHSA-v935-pqmr-g8v9
# Release 0.4.2 (2021-09-03)
- [Use explicit `Integer::div_ceil` to avoid the new unstable method.][219]
**Contributors**: @catenacyber, @cuviper
[219]: https://github.com/rust-num/num-bigint/pull/219
# Release 0.4.1 (2021-08-27)
- [Fixed scalar divide-by-zero panics.][200]
- [Implemented `DoubleEndedIterator` for `U32Digits` and `U64Digits`.][208]
- [Optimized multiplication to avoid unnecessary allocations.][199]
- [Optimized string formatting for very large values.][216]
**Contributors**: @cuviper, @PatrickNorton
[199]: https://github.com/rust-num/num-bigint/pull/199
[200]: https://github.com/rust-num/num-bigint/pull/200
[208]: https://github.com/rust-num/num-bigint/pull/208
[216]: https://github.com/rust-num/num-bigint/pull/216
# Release 0.4.0 (2021-03-05)
### Breaking Changes
- Updated public dependences on [arbitrary, quickcheck][194], and [rand][185]:
- `arbitrary` support has been updated to 1.0, requiring Rust 1.40.
- `quickcheck` support has been updated to 1.0, requiring Rust 1.46.
- `rand` support has been updated to 0.8, requiring Rust 1.36.
- [`Debug` now shows plain numeric values for `BigInt` and `BigUint`][195],
rather than the raw list of internal digits.
**Contributors**: @cuviper, @Gelbpunkt
[185]: https://github.com/rust-num/num-bigint/pull/185
[194]: https://github.com/rust-num/num-bigint/pull/194
[195]: https://github.com/rust-num/num-bigint/pull/195
# Release 0.3.3 (2021-09-03)
- [Use explicit `Integer::div_ceil` to avoid the new unstable method.][219]
**Contributors**: @catenacyber, @cuviper
# Release 0.3.2 (2021-03-04)
- [The new `BigUint` methods `count_ones` and `trailing_ones`][175] return the
number of `1` bits in the entire value or just its least-significant tail,
respectively.
- [The new `BigInt` and `BigUint` methods `bit` and `set_bit`][183] will read
and write individual bits of the value. For negative `BigInt`, bits are
determined as if they were in the two's complement representation.
- [The `from_radix_le` and `from_radix_be` methods][187] now accept empty
buffers to represent zero.
- [`BigInt` and `BigUint` can now iterate digits as `u32` or `u64`][192],
regardless of the actual internal digit size.
**Contributors**: @BartMassey, @cuviper, @janmarthedal, @sebastianv89, @Speedy37
[175]: https://github.com/rust-num/num-bigint/pull/175
[183]: https://github.com/rust-num/num-bigint/pull/183
[187]: https://github.com/rust-num/num-bigint/pull/187
[192]: https://github.com/rust-num/num-bigint/pull/192
# Release 0.3.1 (2020-11-03)
- [Addition and subtraction now uses intrinsics][141] for performance on `x86`
and `x86_64` when built with Rust 1.33 or later.
- [Conversions `to_f32` and `to_f64` now return infinity][163] for very large
numbers, rather than `None`. This does preserve the sign too, so a large
negative `BigInt` will convert to negative infinity.
- [The optional `arbitrary` feature implements `arbitrary::Arbitrary`][166],
distinct from `quickcheck::Arbitrary`.
- [The division algorithm has been optimized][170] to reduce the number of
temporary allocations and improve the internal guesses at each step.
- [`BigInt` and `BigUint` will opportunistically shrink capacity][171] if the
internal vector is much larger than needed.
**Contributors**: @cuviper, @e00E, @ejmahler, @notoria, @tczajka
[141]: https://github.com/rust-num/num-bigint/pull/141
[163]: https://github.com/rust-num/num-bigint/pull/163
[166]: https://github.com/rust-num/num-bigint/pull/166
[170]: https://github.com/rust-num/num-bigint/pull/170
[171]: https://github.com/rust-num/num-bigint/pull/171
# Release 0.3.0 (2020-06-12)
### Enhancements
- [The internal `BigDigit` may now be either `u32` or `u64`][62], although that
implementation detail is not exposed in the API. For now, this is chosen to
match the target pointer size, but may change in the future.
- [No-`std` is now supported with the `alloc` crate on Rust 1.36][101].
- [`Pow` is now implemented for bigint values][137], not just references.
- [`TryFrom` is now implemented on Rust 1.34 and later][123], converting signed
integers to unsigned, and narrowing big integers to primitives.
- [`Shl` and `Shr` are now implemented for a variety of shift types][142].
- A new `trailing_zeros()` returns the number of consecutive zeros from the
least significant bit.
- The new `BigInt::magnitude` and `into_parts` methods give access to its
`BigUint` part as the magnitude.
### Breaking Changes
- `num-bigint` now requires Rust 1.31 or greater.
- The "i128" opt-in feature was removed, now always available.
- [Updated public dependences][110]:
- `rand` support has been updated to 0.7, requiring Rust 1.32.
- `quickcheck` support has been updated to 0.9, requiring Rust 1.34.
- [Removed `impl Neg for BigUint`][145], which only ever panicked.
- [Bit counts are now `u64` instead of `usize`][143].
**Contributors**: @cuviper, @dignifiedquire, @hansihe,
@kpcyrd, @milesand, @tech6hutch
[62]: https://github.com/rust-num/num-bigint/pull/62
[101]: https://github.com/rust-num/num-bigint/pull/101
[110]: https://github.com/rust-num/num-bigint/pull/110
[123]: https://github.com/rust-num/num-bigint/pull/123
[137]: https://github.com/rust-num/num-bigint/pull/137
[142]: https://github.com/rust-num/num-bigint/pull/142
[143]: https://github.com/rust-num/num-bigint/pull/143
[145]: https://github.com/rust-num/num-bigint/pull/145
# Release 0.2.6 (2020-01-27)
- [Fix the promotion of negative `isize` in `BigInt` assign-ops][133].
**Contributors**: @cuviper, @HactarCE
[133]: https://github.com/rust-num/num-bigint/pull/133
# Release 0.2.5 (2020-01-09)
- [Updated the `autocfg` build dependency to 1.0][126].
**Contributors**: @cuviper, @tspiteri
[126]: https://github.com/rust-num/num-bigint/pull/126
# Release 0.2.4 (2020-01-01)
- [The new `BigUint::to_u32_digits` method][104] returns the number as a
little-endian vector of base-2<sup>32</sup> digits. The same method on
`BigInt` also returns the sign.
- [`BigUint::modpow` now applies a modulus even for exponent 1][113], which
also affects `BigInt::modpow`.
- [`BigInt::modpow` now returns the correct sign for negative bases with even
exponents][114].
[104]: https://github.com/rust-num/num-bigint/pull/104
[113]: https://github.com/rust-num/num-bigint/pull/113
[114]: https://github.com/rust-num/num-bigint/pull/114
**Contributors**: @alex-ozdemir, @cuviper, @dingelish, @Speedy37, @youknowone
# Release 0.2.3 (2019-09-03)
- [`Pow` is now implemented for `BigUint` exponents][77].
- [The optional `quickcheck` feature enables implementations of `Arbitrary`][99].
- See the [full comparison][compare-0.2.3] for performance enhancements and more!
[77]: https://github.com/rust-num/num-bigint/pull/77
[99]: https://github.com/rust-num/num-bigint/pull/99
[compare-0.2.3]: https://github.com/rust-num/num-bigint/compare/num-bigint-0.2.2...num-bigint-0.2.3
**Contributors**: @cuviper, @lcnr, @maxbla, @mikelodder7, @mikong,
@TheLetterTheta, @tspiteri, @XAMPPRocky, @youknowone
# Release 0.2.2 (2018-12-14)
- [The `Roots` implementations now use better initial guesses][71].
- [Fixed `to_signed_bytes_*` for some positive numbers][72], where the
most-significant byte is `0x80` and the rest are `0`.
[71]: https://github.com/rust-num/num-bigint/pull/71
[72]: https://github.com/rust-num/num-bigint/pull/72
**Contributors**: @cuviper, @leodasvacas
# Release 0.2.1 (2018-11-02)
- [`RandBigInt` now uses `Rng::fill_bytes`][53] to improve performance, instead
of repeated `gen::<u32>` calls. The also affects the implementations of the
other `rand` traits. This may potentially change the values produced by some
seeded RNGs on previous versions, but the values were tested to be stable
with `ChaChaRng`, `IsaacRng`, and `XorShiftRng`.
- [`BigInt` and `BigUint` now implement `num_integer::Roots`][56].
- [`BigInt` and `BigUint` now implement `num_traits::Pow`][54].
- [`BigInt` and `BigUint` now implement operators with 128-bit integers][64].
**Contributors**: @cuviper, @dignifiedquire, @mancabizjak, @Robbepop,
@TheIronBorn, @thomwiggers
[53]: https://github.com/rust-num/num-bigint/pull/53
[54]: https://github.com/rust-num/num-bigint/pull/54
[56]: https://github.com/rust-num/num-bigint/pull/56
[64]: https://github.com/rust-num/num-bigint/pull/64
# Release 0.2.0 (2018-05-25)
### Enhancements
- [`BigInt` and `BigUint` now implement `Product` and `Sum`][22] for iterators
of any item that we can `Mul` and `Add`, respectively. For example, a
factorial can now be simply: `let f: BigUint = (1u32..1000).product();`
- [`BigInt` now supports two's-complement logic operations][26], namely
`BitAnd`, `BitOr`, `BitXor`, and `Not`. These act conceptually as if each
number had an infinite prefix of `0` or `1` bits for positive or negative.
- [`BigInt` now supports assignment operators][41] like `AddAssign`.
- [`BigInt` and `BigUint` now support conversions with `i128` and `u128`][44],
if sufficient compiler support is detected.
- [`BigInt` and `BigUint` now implement rand's `SampleUniform` trait][48], and
[a custom `RandomBits` distribution samples by bit size][49].
- The release also includes other miscellaneous improvements to performance.
### Breaking Changes
- [`num-bigint` now requires rustc 1.15 or greater][23].
- [The crate now has a `std` feature, and won't build without it][46]. This is
in preparation for someday supporting `#![no_std]` with `alloc`.
- [The `serde` dependency has been updated to 1.0][24], still disabled by
default. The `rustc-serialize` crate is no longer supported by `num-bigint`.
- [The `rand` dependency has been updated to 0.5][48], now disabled by default.
This requires rustc 1.22 or greater for `rand`'s own requirement.
- [`Shr for BigInt` now rounds down][8] rather than toward zero, matching the
behavior of the primitive integers for negative values.
- [`ParseBigIntError` is now an opaque type][37].
- [The `big_digit` module is no longer public][38], nor are the `BigDigit` and
`DoubleBigDigit` types and `ZERO_BIG_DIGIT` constant that were re-exported in
the crate root. Public APIs which deal in digits, like `BigUint::from_slice`,
will now always be base-`u32`.
**Contributors**: @clarcharr, @cuviper, @dodomorandi, @tiehuis, @tspiteri
[8]: https://github.com/rust-num/num-bigint/pull/8
[22]: https://github.com/rust-num/num-bigint/pull/22
[23]: https://github.com/rust-num/num-bigint/pull/23
[24]: https://github.com/rust-num/num-bigint/pull/24
[26]: https://github.com/rust-num/num-bigint/pull/26
[37]: https://github.com/rust-num/num-bigint/pull/37
[38]: https://github.com/rust-num/num-bigint/pull/38
[41]: https://github.com/rust-num/num-bigint/pull/41
[44]: https://github.com/rust-num/num-bigint/pull/44
[46]: https://github.com/rust-num/num-bigint/pull/46
[48]: https://github.com/rust-num/num-bigint/pull/48
[49]: https://github.com/rust-num/num-bigint/pull/49
# Release 0.1.44 (2018-05-14)
- [Division with single-digit divisors is now much faster.][42]
- The README now compares [`ramp`, `rug`, `rust-gmp`][20], and [`apint`][21].
**Contributors**: @cuviper, @Robbepop
[20]: https://github.com/rust-num/num-bigint/pull/20
[21]: https://github.com/rust-num/num-bigint/pull/21
[42]: https://github.com/rust-num/num-bigint/pull/42
# Release 0.1.43 (2018-02-08)
- [The new `BigInt::modpow`][18] performs signed modular exponentiation, using
the existing `BigUint::modpow` and rounding negatives similar to `mod_floor`.
**Contributors**: @cuviper
[18]: https://github.com/rust-num/num-bigint/pull/18
# Release 0.1.42 (2018-02-07)
- [num-bigint now has its own source repository][num-356] at [rust-num/num-bigint][home].
- [`lcm` now avoids creating a large intermediate product][num-350].
- [`gcd` now uses Stein's algorithm][15] with faster shifts instead of division.
- [`rand` support is now extended to 0.4][11] (while still allowing 0.3).
**Contributors**: @cuviper, @Emerentius, @ignatenkobrain, @mhogrefe
[home]: https://github.com/rust-num/num-bigint
[num-350]: https://github.com/rust-num/num/pull/350
[num-356]: https://github.com/rust-num/num/pull/356
[11]: https://github.com/rust-num/num-bigint/pull/11
[15]: https://github.com/rust-num/num-bigint/pull/15
# Prior releases
No prior release notes were kept. Thanks all the same to the many
contributors that have made this crate what it is!

450
vendor/num-bigint/benches/bigint.rs vendored Normal file
View File

@@ -0,0 +1,450 @@
#![feature(test)]
#![cfg(feature = "rand")]
extern crate test;
use num_bigint::{BigInt, BigUint, RandBigInt};
use num_traits::{FromPrimitive, Num, One, Zero};
use std::mem::replace;
use test::Bencher;
mod rng;
use rng::get_rng;
fn multiply_bench(b: &mut Bencher, xbits: u64, ybits: u64) {
let mut rng = get_rng();
let x = rng.gen_bigint(xbits);
let y = rng.gen_bigint(ybits);
b.iter(|| &x * &y);
}
fn divide_bench(b: &mut Bencher, xbits: u64, ybits: u64) {
let mut rng = get_rng();
let x = rng.gen_bigint(xbits);
let y = rng.gen_bigint(ybits);
b.iter(|| &x / &y);
}
fn remainder_bench(b: &mut Bencher, xbits: u64, ybits: u64) {
let mut rng = get_rng();
let x = rng.gen_bigint(xbits);
let y = rng.gen_bigint(ybits);
b.iter(|| &x % &y);
}
fn factorial(n: usize) -> BigUint {
let mut f: BigUint = One::one();
for i in 1..=n {
let bu: BigUint = FromPrimitive::from_usize(i).unwrap();
f *= bu;
}
f
}
/// Compute Fibonacci numbers
fn fib(n: usize) -> BigUint {
let mut f0: BigUint = Zero::zero();
let mut f1: BigUint = One::one();
for _ in 0..n {
let f2 = f0 + &f1;
f0 = replace(&mut f1, f2);
}
f0
}
/// Compute Fibonacci numbers with two ops per iteration
/// (add and subtract, like issue #200)
fn fib2(n: usize) -> BigUint {
let mut f0: BigUint = Zero::zero();
let mut f1: BigUint = One::one();
for _ in 0..n {
f1 += &f0;
f0 = &f1 - f0;
}
f0
}
#[bench]
fn multiply_0(b: &mut Bencher) {
multiply_bench(b, 1 << 8, 1 << 8);
}
#[bench]
fn multiply_1(b: &mut Bencher) {
multiply_bench(b, 1 << 8, 1 << 16);
}
#[bench]
fn multiply_2(b: &mut Bencher) {
multiply_bench(b, 1 << 16, 1 << 16);
}
#[bench]
fn multiply_3(b: &mut Bencher) {
multiply_bench(b, 1 << 16, 1 << 17);
}
#[bench]
fn multiply_4(b: &mut Bencher) {
multiply_bench(b, 1 << 12, 1 << 13);
}
#[bench]
fn multiply_5(b: &mut Bencher) {
multiply_bench(b, 1 << 12, 1 << 14);
}
#[bench]
fn divide_0(b: &mut Bencher) {
divide_bench(b, 1 << 8, 1 << 6);
}
#[bench]
fn divide_1(b: &mut Bencher) {
divide_bench(b, 1 << 12, 1 << 8);
}
#[bench]
fn divide_2(b: &mut Bencher) {
divide_bench(b, 1 << 16, 1 << 12);
}
#[bench]
fn divide_big_little(b: &mut Bencher) {
divide_bench(b, 1 << 16, 1 << 4);
}
#[bench]
fn remainder_0(b: &mut Bencher) {
remainder_bench(b, 1 << 8, 1 << 6);
}
#[bench]
fn remainder_1(b: &mut Bencher) {
remainder_bench(b, 1 << 12, 1 << 8);
}
#[bench]
fn remainder_2(b: &mut Bencher) {
remainder_bench(b, 1 << 16, 1 << 12);
}
#[bench]
fn remainder_big_little(b: &mut Bencher) {
remainder_bench(b, 1 << 16, 1 << 4);
}
#[bench]
fn factorial_100(b: &mut Bencher) {
b.iter(|| factorial(100));
}
#[bench]
fn fib_100(b: &mut Bencher) {
b.iter(|| fib(100));
}
#[bench]
fn fib_1000(b: &mut Bencher) {
b.iter(|| fib(1000));
}
#[bench]
fn fib_10000(b: &mut Bencher) {
b.iter(|| fib(10000));
}
#[bench]
fn fib2_100(b: &mut Bencher) {
b.iter(|| fib2(100));
}
#[bench]
fn fib2_1000(b: &mut Bencher) {
b.iter(|| fib2(1000));
}
#[bench]
fn fib2_10000(b: &mut Bencher) {
b.iter(|| fib2(10000));
}
#[bench]
fn fac_to_string(b: &mut Bencher) {
let fac = factorial(100);
b.iter(|| fac.to_string());
}
#[bench]
fn fib_to_string(b: &mut Bencher) {
let fib = fib(100);
b.iter(|| fib.to_string());
}
fn to_str_radix_bench(b: &mut Bencher, radix: u32, bits: u64) {
let mut rng = get_rng();
let x = rng.gen_bigint(bits);
b.iter(|| x.to_str_radix(radix));
}
#[bench]
fn to_str_radix_02(b: &mut Bencher) {
to_str_radix_bench(b, 2, 1009);
}
#[bench]
fn to_str_radix_08(b: &mut Bencher) {
to_str_radix_bench(b, 8, 1009);
}
#[bench]
fn to_str_radix_10(b: &mut Bencher) {
to_str_radix_bench(b, 10, 1009);
}
#[bench]
fn to_str_radix_10_2(b: &mut Bencher) {
to_str_radix_bench(b, 10, 10009);
}
#[bench]
fn to_str_radix_16(b: &mut Bencher) {
to_str_radix_bench(b, 16, 1009);
}
#[bench]
fn to_str_radix_36(b: &mut Bencher) {
to_str_radix_bench(b, 36, 1009);
}
fn from_str_radix_bench(b: &mut Bencher, radix: u32) {
let mut rng = get_rng();
let x = rng.gen_bigint(1009);
let s = x.to_str_radix(radix);
assert_eq!(x, BigInt::from_str_radix(&s, radix).unwrap());
b.iter(|| BigInt::from_str_radix(&s, radix));
}
#[bench]
fn from_str_radix_02(b: &mut Bencher) {
from_str_radix_bench(b, 2);
}
#[bench]
fn from_str_radix_08(b: &mut Bencher) {
from_str_radix_bench(b, 8);
}
#[bench]
fn from_str_radix_10(b: &mut Bencher) {
from_str_radix_bench(b, 10);
}
#[bench]
fn from_str_radix_16(b: &mut Bencher) {
from_str_radix_bench(b, 16);
}
#[bench]
fn from_str_radix_36(b: &mut Bencher) {
from_str_radix_bench(b, 36);
}
fn rand_bench(b: &mut Bencher, bits: u64) {
let mut rng = get_rng();
b.iter(|| rng.gen_bigint(bits));
}
#[bench]
fn rand_64(b: &mut Bencher) {
rand_bench(b, 1 << 6);
}
#[bench]
fn rand_256(b: &mut Bencher) {
rand_bench(b, 1 << 8);
}
#[bench]
fn rand_1009(b: &mut Bencher) {
rand_bench(b, 1009);
}
#[bench]
fn rand_2048(b: &mut Bencher) {
rand_bench(b, 1 << 11);
}
#[bench]
fn rand_4096(b: &mut Bencher) {
rand_bench(b, 1 << 12);
}
#[bench]
fn rand_8192(b: &mut Bencher) {
rand_bench(b, 1 << 13);
}
#[bench]
fn rand_65536(b: &mut Bencher) {
rand_bench(b, 1 << 16);
}
#[bench]
fn rand_131072(b: &mut Bencher) {
rand_bench(b, 1 << 17);
}
#[bench]
fn shl(b: &mut Bencher) {
let n = BigUint::one() << 1000u32;
let mut m = n.clone();
b.iter(|| {
m.clone_from(&n);
for i in 0..50 {
m <<= i;
}
})
}
#[bench]
fn shr(b: &mut Bencher) {
let n = BigUint::one() << 2000u32;
let mut m = n.clone();
b.iter(|| {
m.clone_from(&n);
for i in 0..50 {
m >>= i;
}
})
}
#[bench]
fn hash(b: &mut Bencher) {
use std::collections::HashSet;
let mut rng = get_rng();
let v: Vec<BigInt> = (1000..2000).map(|bits| rng.gen_bigint(bits)).collect();
b.iter(|| {
let h: HashSet<&BigInt> = v.iter().collect();
assert_eq!(h.len(), v.len());
});
}
#[bench]
fn pow_bench(b: &mut Bencher) {
b.iter(|| {
let upper = 100_u32;
let mut i_big = BigUint::from(1u32);
for _i in 2..=upper {
i_big += 1u32;
for j in 2..=upper {
i_big.pow(j);
}
}
});
}
#[bench]
fn pow_bench_bigexp(b: &mut Bencher) {
use num_traits::Pow;
b.iter(|| {
let upper = 100_u32;
let mut i_big = BigUint::from(1u32);
for _i in 2..=upper {
i_big += 1u32;
let mut j_big = BigUint::from(1u32);
for _j in 2..=upper {
j_big += 1u32;
Pow::pow(&i_big, &j_big);
}
}
});
}
#[bench]
fn pow_bench_1e1000(b: &mut Bencher) {
b.iter(|| BigUint::from(10u32).pow(1_000));
}
#[bench]
fn pow_bench_1e10000(b: &mut Bencher) {
b.iter(|| BigUint::from(10u32).pow(10_000));
}
#[bench]
fn pow_bench_1e100000(b: &mut Bencher) {
b.iter(|| BigUint::from(10u32).pow(100_000));
}
/// This modulus is the prime from the 2048-bit MODP DH group:
/// https://tools.ietf.org/html/rfc3526#section-3
const RFC3526_2048BIT_MODP_GROUP: &str = "\
FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\
29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\
EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\
E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\
EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\
C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\
83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\
670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\
E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\
DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\
15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF";
#[bench]
fn modpow(b: &mut Bencher) {
let mut rng = get_rng();
let base = rng.gen_biguint(2048);
let e = rng.gen_biguint(2048);
let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap();
b.iter(|| base.modpow(&e, &m));
}
#[bench]
fn modpow_even(b: &mut Bencher) {
let mut rng = get_rng();
let base = rng.gen_biguint(2048);
let e = rng.gen_biguint(2048);
// Make the modulus even, so monty (base-2^32) doesn't apply.
let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap() - 1u32;
b.iter(|| base.modpow(&e, &m));
}
#[bench]
fn to_u32_digits(b: &mut Bencher) {
let mut rng = get_rng();
let n = rng.gen_biguint(2048);
b.iter(|| n.to_u32_digits());
}
#[bench]
fn iter_u32_digits(b: &mut Bencher) {
let mut rng = get_rng();
let n = rng.gen_biguint(2048);
b.iter(|| n.iter_u32_digits().max());
}
#[bench]
fn to_u64_digits(b: &mut Bencher) {
let mut rng = get_rng();
let n = rng.gen_biguint(2048);
b.iter(|| n.to_u64_digits());
}
#[bench]
fn iter_u64_digits(b: &mut Bencher) {
let mut rng = get_rng();
let n = rng.gen_biguint(2048);
b.iter(|| n.iter_u64_digits().max());
}

42
vendor/num-bigint/benches/factorial.rs vendored Normal file
View File

@@ -0,0 +1,42 @@
#![feature(test)]
extern crate test;
use num_bigint::BigUint;
use num_traits::One;
use std::ops::{Div, Mul};
use test::Bencher;
#[bench]
fn factorial_mul_biguint(b: &mut Bencher) {
b.iter(|| {
(1u32..1000)
.map(BigUint::from)
.fold(BigUint::one(), Mul::mul)
});
}
#[bench]
fn factorial_mul_u32(b: &mut Bencher) {
b.iter(|| (1u32..1000).fold(BigUint::one(), Mul::mul));
}
// The division test is inspired by this blog comparison:
// <https://tiehuis.github.io/big-integers-in-zig#division-test-single-limb>
#[bench]
fn factorial_div_biguint(b: &mut Bencher) {
let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul);
b.iter(|| {
(1u32..1000)
.rev()
.map(BigUint::from)
.fold(n.clone(), Div::div)
});
}
#[bench]
fn factorial_div_u32(b: &mut Bencher) {
let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul);
b.iter(|| (1u32..1000).rev().fold(n.clone(), Div::div));
}

76
vendor/num-bigint/benches/gcd.rs vendored Normal file
View File

@@ -0,0 +1,76 @@
#![feature(test)]
#![cfg(feature = "rand")]
extern crate test;
use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::Zero;
use test::Bencher;
mod rng;
use rng::get_rng;
fn bench(b: &mut Bencher, bits: u64, gcd: fn(&BigUint, &BigUint) -> BigUint) {
let mut rng = get_rng();
let x = rng.gen_biguint(bits);
let y = rng.gen_biguint(bits);
assert_eq!(euclid(&x, &y), x.gcd(&y));
b.iter(|| gcd(&x, &y));
}
fn euclid(x: &BigUint, y: &BigUint) -> BigUint {
// Use Euclid's algorithm
let mut m = x.clone();
let mut n = y.clone();
while !m.is_zero() {
let temp = m;
m = n % &temp;
n = temp;
}
n
}
#[bench]
fn gcd_euclid_0064(b: &mut Bencher) {
bench(b, 64, euclid);
}
#[bench]
fn gcd_euclid_0256(b: &mut Bencher) {
bench(b, 256, euclid);
}
#[bench]
fn gcd_euclid_1024(b: &mut Bencher) {
bench(b, 1024, euclid);
}
#[bench]
fn gcd_euclid_4096(b: &mut Bencher) {
bench(b, 4096, euclid);
}
// Integer for BigUint now uses Stein for gcd
#[bench]
fn gcd_stein_0064(b: &mut Bencher) {
bench(b, 64, BigUint::gcd);
}
#[bench]
fn gcd_stein_0256(b: &mut Bencher) {
bench(b, 256, BigUint::gcd);
}
#[bench]
fn gcd_stein_1024(b: &mut Bencher) {
bench(b, 1024, BigUint::gcd);
}
#[bench]
fn gcd_stein_4096(b: &mut Bencher) {
bench(b, 4096, BigUint::gcd);
}

38
vendor/num-bigint/benches/rng/mod.rs vendored Normal file
View File

@@ -0,0 +1,38 @@
use rand::RngCore;
pub(crate) fn get_rng() -> impl RngCore {
XorShiftStar {
a: 0x0123_4567_89AB_CDEF,
}
}
/// Simple `Rng` for benchmarking without additional dependencies
struct XorShiftStar {
a: u64,
}
impl RngCore for XorShiftStar {
fn next_u32(&mut self) -> u32 {
self.next_u64() as u32
}
fn next_u64(&mut self) -> u64 {
// https://en.wikipedia.org/wiki/Xorshift#xorshift*
self.a ^= self.a >> 12;
self.a ^= self.a << 25;
self.a ^= self.a >> 27;
self.a.wrapping_mul(0x2545_F491_4F6C_DD1D)
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
for chunk in dest.chunks_mut(8) {
let bytes = self.next_u64().to_le_bytes();
let slice = &bytes[..chunk.len()];
chunk.copy_from_slice(slice)
}
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> {
Ok(self.fill_bytes(dest))
}
}

166
vendor/num-bigint/benches/roots.rs vendored Normal file
View File

@@ -0,0 +1,166 @@
#![feature(test)]
#![cfg(feature = "rand")]
extern crate test;
use num_bigint::{BigUint, RandBigInt};
use test::Bencher;
mod rng;
use rng::get_rng;
// The `big64` cases demonstrate the speed of cases where the value
// can be converted to a `u64` primitive for faster calculation.
//
// The `big1k` cases demonstrate those that can convert to `f64` for
// a better initial guess of the actual value.
//
// The `big2k` and `big4k` cases are too big for `f64`, and use a simpler guess.
fn check(x: &BigUint, n: u32) {
let root = x.nth_root(n);
if n == 2 {
assert_eq!(root, x.sqrt())
} else if n == 3 {
assert_eq!(root, x.cbrt())
}
let lo = root.pow(n);
assert!(lo <= *x);
assert_eq!(lo.nth_root(n), root);
assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32);
let hi = (&root + 1u32).pow(n);
assert!(hi > *x);
assert_eq!(hi.nth_root(n), &root + 1u32);
assert_eq!((&hi - 1u32).nth_root(n), root);
}
fn bench_sqrt(b: &mut Bencher, bits: u64) {
let x = get_rng().gen_biguint(bits);
eprintln!("bench_sqrt({})", x);
check(&x, 2);
b.iter(|| x.sqrt());
}
#[bench]
fn big64_sqrt(b: &mut Bencher) {
bench_sqrt(b, 64);
}
#[bench]
fn big1k_sqrt(b: &mut Bencher) {
bench_sqrt(b, 1024);
}
#[bench]
fn big2k_sqrt(b: &mut Bencher) {
bench_sqrt(b, 2048);
}
#[bench]
fn big4k_sqrt(b: &mut Bencher) {
bench_sqrt(b, 4096);
}
fn bench_cbrt(b: &mut Bencher, bits: u64) {
let x = get_rng().gen_biguint(bits);
eprintln!("bench_cbrt({})", x);
check(&x, 3);
b.iter(|| x.cbrt());
}
#[bench]
fn big64_cbrt(b: &mut Bencher) {
bench_cbrt(b, 64);
}
#[bench]
fn big1k_cbrt(b: &mut Bencher) {
bench_cbrt(b, 1024);
}
#[bench]
fn big2k_cbrt(b: &mut Bencher) {
bench_cbrt(b, 2048);
}
#[bench]
fn big4k_cbrt(b: &mut Bencher) {
bench_cbrt(b, 4096);
}
fn bench_nth_root(b: &mut Bencher, bits: u64, n: u32) {
let x = get_rng().gen_biguint(bits);
eprintln!("bench_{}th_root({})", n, x);
check(&x, n);
b.iter(|| x.nth_root(n));
}
#[bench]
fn big64_nth_10(b: &mut Bencher) {
bench_nth_root(b, 64, 10);
}
#[bench]
fn big1k_nth_10(b: &mut Bencher) {
bench_nth_root(b, 1024, 10);
}
#[bench]
fn big1k_nth_100(b: &mut Bencher) {
bench_nth_root(b, 1024, 100);
}
#[bench]
fn big1k_nth_1000(b: &mut Bencher) {
bench_nth_root(b, 1024, 1000);
}
#[bench]
fn big1k_nth_10000(b: &mut Bencher) {
bench_nth_root(b, 1024, 10000);
}
#[bench]
fn big2k_nth_10(b: &mut Bencher) {
bench_nth_root(b, 2048, 10);
}
#[bench]
fn big2k_nth_100(b: &mut Bencher) {
bench_nth_root(b, 2048, 100);
}
#[bench]
fn big2k_nth_1000(b: &mut Bencher) {
bench_nth_root(b, 2048, 1000);
}
#[bench]
fn big2k_nth_10000(b: &mut Bencher) {
bench_nth_root(b, 2048, 10000);
}
#[bench]
fn big4k_nth_10(b: &mut Bencher) {
bench_nth_root(b, 4096, 10);
}
#[bench]
fn big4k_nth_100(b: &mut Bencher) {
bench_nth_root(b, 4096, 100);
}
#[bench]
fn big4k_nth_1000(b: &mut Bencher) {
bench_nth_root(b, 4096, 1000);
}
#[bench]
fn big4k_nth_10000(b: &mut Bencher) {
bench_nth_root(b, 4096, 10000);
}

View File

@@ -0,0 +1,138 @@
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2013-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
use std::io;
use std::str::FromStr;
use num_bigint::BigInt;
use num_integer::Integer;
use num_traits::{FromPrimitive, One, ToPrimitive, Zero};
struct Context {
numer: BigInt,
accum: BigInt,
denom: BigInt,
}
impl Context {
fn new() -> Context {
Context {
numer: One::one(),
accum: Zero::zero(),
denom: One::one(),
}
}
fn from_i32(i: i32) -> BigInt {
FromPrimitive::from_i32(i).unwrap()
}
fn extract_digit(&self) -> i32 {
if self.numer > self.accum {
return -1;
}
let (q, r) = (&self.numer * Context::from_i32(3) + &self.accum).div_rem(&self.denom);
if r + &self.numer >= self.denom {
return -1;
}
q.to_i32().unwrap()
}
fn next_term(&mut self, k: i32) {
let y2 = Context::from_i32(k * 2 + 1);
self.accum = (&self.accum + (&self.numer << 1)) * &y2;
self.numer = &self.numer * Context::from_i32(k);
self.denom = &self.denom * y2;
}
fn eliminate_digit(&mut self, d: i32) {
let d = Context::from_i32(d);
let ten = Context::from_i32(10);
self.accum = (&self.accum - &self.denom * d) * &ten;
self.numer = &self.numer * ten;
}
}
fn pidigits(n: isize, out: &mut dyn io::Write) -> io::Result<()> {
let mut k = 0;
let mut context = Context::new();
for i in 1..=n {
let mut d;
loop {
k += 1;
context.next_term(k);
d = context.extract_digit();
if d != -1 {
break;
}
}
write!(out, "{}", d)?;
if i % 10 == 0 {
writeln!(out, "\t:{}", i)?;
}
context.eliminate_digit(d);
}
let m = n % 10;
if m != 0 {
for _ in m..10 {
write!(out, " ")?;
}
writeln!(out, "\t:{}", n)?;
}
Ok(())
}
const DEFAULT_DIGITS: isize = 512;
fn main() {
let args = std::env::args().collect::<Vec<_>>();
let n = if args.len() < 2 {
DEFAULT_DIGITS
} else if args[1] == "--bench" {
return pidigits(DEFAULT_DIGITS, &mut std::io::sink()).unwrap();
} else {
FromStr::from_str(&args[1]).unwrap()
};
pidigits(n, &mut std::io::stdout()).unwrap();
}

1229
vendor/num-bigint/src/bigint.rs vendored Normal file

File diff suppressed because it is too large Load Diff

239
vendor/num-bigint/src/bigint/addition.rs vendored Normal file
View File

@@ -0,0 +1,239 @@
use super::CheckedUnsignedAbs::{Negative, Positive};
use super::Sign::{Minus, NoSign, Plus};
use super::{BigInt, UnsignedAbs};
use crate::{IsizePromotion, UsizePromotion};
use core::cmp::Ordering::{Equal, Greater, Less};
use core::iter::Sum;
use core::mem;
use core::ops::{Add, AddAssign};
use num_traits::CheckedAdd;
// We want to forward to BigUint::add, but it's not clear how that will go until
// we compare both sign and magnitude. So we duplicate this body for every
// val/ref combination, deferring that decision to BigUint's own forwarding.
macro_rules! bigint_add {
($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
match ($a.sign, $b.sign) {
(_, NoSign) => $a_owned,
(NoSign, _) => $b_owned,
// same sign => keep the sign with the sum of magnitudes
(Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
// opposite signs => keep the sign of the larger with the difference of magnitudes
(Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) {
Less => BigInt::from_biguint($b.sign, $b_data - $a_data),
Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
Equal => BigInt::ZERO,
},
}
};
}
impl Add<&BigInt> for &BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: &BigInt) -> BigInt {
bigint_add!(
self,
self.clone(),
&self.data,
other,
other.clone(),
&other.data
)
}
}
impl Add<BigInt> for &BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: BigInt) -> BigInt {
bigint_add!(self, self.clone(), &self.data, other, other, other.data)
}
}
impl Add<&BigInt> for BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: &BigInt) -> BigInt {
bigint_add!(self, self, self.data, other, other.clone(), &other.data)
}
}
impl Add<BigInt> for BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: BigInt) -> BigInt {
bigint_add!(self, self, self.data, other, other, other.data)
}
}
impl AddAssign<&BigInt> for BigInt {
#[inline]
fn add_assign(&mut self, other: &BigInt) {
let n = mem::replace(self, Self::ZERO);
*self = n + other;
}
}
forward_val_assign!(impl AddAssign for BigInt, add_assign);
promote_all_scalars!(impl Add for BigInt, add);
promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign);
forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigInt, add);
forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigInt, add);
forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigInt, add);
impl Add<u32> for BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: u32) -> BigInt {
match self.sign {
NoSign => From::from(other),
Plus => BigInt::from(self.data + other),
Minus => match self.data.cmp(&From::from(other)) {
Equal => Self::ZERO,
Less => BigInt::from(other - self.data),
Greater => -BigInt::from(self.data - other),
},
}
}
}
impl AddAssign<u32> for BigInt {
#[inline]
fn add_assign(&mut self, other: u32) {
let n = mem::replace(self, Self::ZERO);
*self = n + other;
}
}
impl Add<u64> for BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: u64) -> BigInt {
match self.sign {
NoSign => From::from(other),
Plus => BigInt::from(self.data + other),
Minus => match self.data.cmp(&From::from(other)) {
Equal => Self::ZERO,
Less => BigInt::from(other - self.data),
Greater => -BigInt::from(self.data - other),
},
}
}
}
impl AddAssign<u64> for BigInt {
#[inline]
fn add_assign(&mut self, other: u64) {
let n = mem::replace(self, Self::ZERO);
*self = n + other;
}
}
impl Add<u128> for BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: u128) -> BigInt {
match self.sign {
NoSign => BigInt::from(other),
Plus => BigInt::from(self.data + other),
Minus => match self.data.cmp(&From::from(other)) {
Equal => Self::ZERO,
Less => BigInt::from(other - self.data),
Greater => -BigInt::from(self.data - other),
},
}
}
}
impl AddAssign<u128> for BigInt {
#[inline]
fn add_assign(&mut self, other: u128) {
let n = mem::replace(self, Self::ZERO);
*self = n + other;
}
}
forward_all_scalar_binop_to_val_val_commutative!(impl Add<i32> for BigInt, add);
forward_all_scalar_binop_to_val_val_commutative!(impl Add<i64> for BigInt, add);
forward_all_scalar_binop_to_val_val_commutative!(impl Add<i128> for BigInt, add);
impl Add<i32> for BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: i32) -> BigInt {
match other.checked_uabs() {
Positive(u) => self + u,
Negative(u) => self - u,
}
}
}
impl AddAssign<i32> for BigInt {
#[inline]
fn add_assign(&mut self, other: i32) {
match other.checked_uabs() {
Positive(u) => *self += u,
Negative(u) => *self -= u,
}
}
}
impl Add<i64> for BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: i64) -> BigInt {
match other.checked_uabs() {
Positive(u) => self + u,
Negative(u) => self - u,
}
}
}
impl AddAssign<i64> for BigInt {
#[inline]
fn add_assign(&mut self, other: i64) {
match other.checked_uabs() {
Positive(u) => *self += u,
Negative(u) => *self -= u,
}
}
}
impl Add<i128> for BigInt {
type Output = BigInt;
#[inline]
fn add(self, other: i128) -> BigInt {
match other.checked_uabs() {
Positive(u) => self + u,
Negative(u) => self - u,
}
}
}
impl AddAssign<i128> for BigInt {
#[inline]
fn add_assign(&mut self, other: i128) {
match other.checked_uabs() {
Positive(u) => *self += u,
Negative(u) => *self -= u,
}
}
}
impl CheckedAdd for BigInt {
#[inline]
fn checked_add(&self, v: &BigInt) -> Option<BigInt> {
Some(self.add(v))
}
}
impl_sum_iter_type!(BigInt);

View File

@@ -0,0 +1,43 @@
#![cfg(any(feature = "quickcheck", feature = "arbitrary"))]
use super::{BigInt, Sign};
use crate::BigUint;
#[cfg(feature = "quickcheck")]
use alloc::boxed::Box;
#[cfg(feature = "quickcheck")]
#[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))]
impl quickcheck::Arbitrary for BigInt {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let positive = bool::arbitrary(g);
let sign = if positive { Sign::Plus } else { Sign::Minus };
Self::from_biguint(sign, BigUint::arbitrary(g))
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
let sign = self.sign();
let unsigned_shrink = self.data.shrink();
Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x)))
}
}
#[cfg(feature = "arbitrary")]
#[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))]
impl arbitrary::Arbitrary<'_> for BigInt {
fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
let positive = bool::arbitrary(u)?;
let sign = if positive { Sign::Plus } else { Sign::Minus };
Ok(Self::from_biguint(sign, BigUint::arbitrary(u)?))
}
fn arbitrary_take_rest(mut u: arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
let positive = bool::arbitrary(&mut u)?;
let sign = if positive { Sign::Plus } else { Sign::Minus };
Ok(Self::from_biguint(sign, BigUint::arbitrary_take_rest(u)?))
}
fn size_hint(depth: usize) -> (usize, Option<usize>) {
arbitrary::size_hint::and(bool::size_hint(depth), BigUint::size_hint(depth))
}
}

531
vendor/num-bigint/src/bigint/bits.rs vendored Normal file
View File

@@ -0,0 +1,531 @@
use super::BigInt;
use super::Sign::{Minus, NoSign, Plus};
use crate::big_digit::{self, BigDigit, DoubleBigDigit};
use crate::biguint::IntDigits;
use alloc::vec::Vec;
use core::cmp::Ordering::{Equal, Greater, Less};
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign};
use num_traits::{ToPrimitive, Zero};
// Negation in two's complement.
// acc must be initialized as 1 for least-significant digit.
//
// When negating, a carry (acc == 1) means that all the digits
// considered to this point were zero. This means that if all the
// digits of a negative BigInt have been considered, carry must be
// zero as we cannot have negative zero.
//
// 01 -> ...f ff
// ff -> ...f 01
// 01 00 -> ...f ff 00
// 01 01 -> ...f fe ff
// 01 ff -> ...f fe 01
// ff 00 -> ...f 01 00
// ff 01 -> ...f 00 ff
// ff ff -> ...f 00 01
#[inline]
fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
*acc += DoubleBigDigit::from(!a);
let lo = *acc as BigDigit;
*acc >>= big_digit::BITS;
lo
}
// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1
// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff
// answer is pos, has length of a
fn bitand_pos_neg(a: &mut [BigDigit], b: &[BigDigit]) {
let mut carry_b = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_b = negate_carry(bi, &mut carry_b);
*ai &= twos_b;
}
debug_assert!(b.len() > a.len() || carry_b == 0);
}
// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff
// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1
// answer is pos, has length of b
fn bitand_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
let mut carry_a = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_a = negate_carry(*ai, &mut carry_a);
*ai = twos_a & bi;
}
debug_assert!(a.len() > b.len() || carry_a == 0);
match Ord::cmp(&a.len(), &b.len()) {
Greater => a.truncate(b.len()),
Equal => {}
Less => {
let extra = &b[a.len()..];
a.extend(extra.iter().cloned());
}
}
}
// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff
// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff
// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100
// answer is neg, has length of longest with a possible carry
fn bitand_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_b = 1;
let mut carry_and = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_a = negate_carry(*ai, &mut carry_a);
let twos_b = negate_carry(bi, &mut carry_b);
*ai = negate_carry(twos_a & twos_b, &mut carry_and);
}
debug_assert!(a.len() > b.len() || carry_a == 0);
debug_assert!(b.len() > a.len() || carry_b == 0);
match Ord::cmp(&a.len(), &b.len()) {
Greater => {
for ai in a[b.len()..].iter_mut() {
let twos_a = negate_carry(*ai, &mut carry_a);
*ai = negate_carry(twos_a, &mut carry_and);
}
debug_assert!(carry_a == 0);
}
Equal => {}
Less => {
let extra = &b[a.len()..];
a.extend(extra.iter().map(|&bi| {
let twos_b = negate_carry(bi, &mut carry_b);
negate_carry(twos_b, &mut carry_and)
}));
debug_assert!(carry_b == 0);
}
}
if carry_and != 0 {
a.push(1);
}
}
forward_val_val_binop!(impl BitAnd for BigInt, bitand);
forward_ref_val_binop!(impl BitAnd for BigInt, bitand);
// do not use forward_ref_ref_binop_commutative! for bitand so that we can
// clone as needed, avoiding over-allocation
impl BitAnd<&BigInt> for &BigInt {
type Output = BigInt;
#[inline]
fn bitand(self, other: &BigInt) -> BigInt {
match (self.sign, other.sign) {
(NoSign, _) | (_, NoSign) => BigInt::ZERO,
(Plus, Plus) => BigInt::from(&self.data & &other.data),
(Plus, Minus) => self.clone() & other,
(Minus, Plus) => other.clone() & self,
(Minus, Minus) => {
// forward to val-ref, choosing the larger to clone
if self.len() >= other.len() {
self.clone() & other
} else {
other.clone() & self
}
}
}
}
}
impl BitAnd<&BigInt> for BigInt {
type Output = BigInt;
#[inline]
fn bitand(mut self, other: &BigInt) -> BigInt {
self &= other;
self
}
}
forward_val_assign!(impl BitAndAssign for BigInt, bitand_assign);
impl BitAndAssign<&BigInt> for BigInt {
fn bitand_assign(&mut self, other: &BigInt) {
match (self.sign, other.sign) {
(NoSign, _) => {}
(_, NoSign) => self.set_zero(),
(Plus, Plus) => {
self.data &= &other.data;
if self.data.is_zero() {
self.sign = NoSign;
}
}
(Plus, Minus) => {
bitand_pos_neg(self.digits_mut(), other.digits());
self.normalize();
}
(Minus, Plus) => {
bitand_neg_pos(self.digits_mut(), other.digits());
self.sign = Plus;
self.normalize();
}
(Minus, Minus) => {
bitand_neg_neg(self.digits_mut(), other.digits());
self.normalize();
}
}
}
}
// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff
// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1
// answer is neg, has length of b
fn bitor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
let mut carry_b = 1;
let mut carry_or = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_b = negate_carry(bi, &mut carry_b);
*ai = negate_carry(*ai | twos_b, &mut carry_or);
}
debug_assert!(b.len() > a.len() || carry_b == 0);
match Ord::cmp(&a.len(), &b.len()) {
Greater => {
a.truncate(b.len());
}
Equal => {}
Less => {
let extra = &b[a.len()..];
a.extend(extra.iter().map(|&bi| {
let twos_b = negate_carry(bi, &mut carry_b);
negate_carry(twos_b, &mut carry_or)
}));
debug_assert!(carry_b == 0);
}
}
// for carry_or to be non-zero, we would need twos_b == 0
debug_assert!(carry_or == 0);
}
// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1
// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff
// answer is neg, has length of a
fn bitor_neg_pos(a: &mut [BigDigit], b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_or = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_a = negate_carry(*ai, &mut carry_a);
*ai = negate_carry(twos_a | bi, &mut carry_or);
}
debug_assert!(a.len() > b.len() || carry_a == 0);
if a.len() > b.len() {
for ai in a[b.len()..].iter_mut() {
let twos_a = negate_carry(*ai, &mut carry_a);
*ai = negate_carry(twos_a, &mut carry_or);
}
debug_assert!(carry_a == 0);
}
// for carry_or to be non-zero, we would need twos_a == 0
debug_assert!(carry_or == 0);
}
// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1
// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1
// answer is neg, has length of shortest
fn bitor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_b = 1;
let mut carry_or = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_a = negate_carry(*ai, &mut carry_a);
let twos_b = negate_carry(bi, &mut carry_b);
*ai = negate_carry(twos_a | twos_b, &mut carry_or);
}
debug_assert!(a.len() > b.len() || carry_a == 0);
debug_assert!(b.len() > a.len() || carry_b == 0);
if a.len() > b.len() {
a.truncate(b.len());
}
// for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0
debug_assert!(carry_or == 0);
}
forward_val_val_binop!(impl BitOr for BigInt, bitor);
forward_ref_val_binop!(impl BitOr for BigInt, bitor);
// do not use forward_ref_ref_binop_commutative! for bitor so that we can
// clone as needed, avoiding over-allocation
impl BitOr<&BigInt> for &BigInt {
type Output = BigInt;
#[inline]
fn bitor(self, other: &BigInt) -> BigInt {
match (self.sign, other.sign) {
(NoSign, _) => other.clone(),
(_, NoSign) => self.clone(),
(Plus, Plus) => BigInt::from(&self.data | &other.data),
(Plus, Minus) => other.clone() | self,
(Minus, Plus) => self.clone() | other,
(Minus, Minus) => {
// forward to val-ref, choosing the smaller to clone
if self.len() <= other.len() {
self.clone() | other
} else {
other.clone() | self
}
}
}
}
}
impl BitOr<&BigInt> for BigInt {
type Output = BigInt;
#[inline]
fn bitor(mut self, other: &BigInt) -> BigInt {
self |= other;
self
}
}
forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign);
impl BitOrAssign<&BigInt> for BigInt {
fn bitor_assign(&mut self, other: &BigInt) {
match (self.sign, other.sign) {
(_, NoSign) => {}
(NoSign, _) => self.clone_from(other),
(Plus, Plus) => self.data |= &other.data,
(Plus, Minus) => {
bitor_pos_neg(self.digits_mut(), other.digits());
self.sign = Minus;
self.normalize();
}
(Minus, Plus) => {
bitor_neg_pos(self.digits_mut(), other.digits());
self.normalize();
}
(Minus, Minus) => {
bitor_neg_neg(self.digits_mut(), other.digits());
self.normalize();
}
}
}
}
// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100
// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100
// answer is neg, has length of longest with a possible carry
fn bitxor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
let mut carry_b = 1;
let mut carry_xor = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_b = negate_carry(bi, &mut carry_b);
*ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
}
debug_assert!(b.len() > a.len() || carry_b == 0);
match Ord::cmp(&a.len(), &b.len()) {
Greater => {
for ai in a[b.len()..].iter_mut() {
let twos_b = !0;
*ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
}
}
Equal => {}
Less => {
let extra = &b[a.len()..];
a.extend(extra.iter().map(|&bi| {
let twos_b = negate_carry(bi, &mut carry_b);
negate_carry(twos_b, &mut carry_xor)
}));
debug_assert!(carry_b == 0);
}
}
if carry_xor != 0 {
a.push(1);
}
}
// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100
// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100
// answer is neg, has length of longest with a possible carry
fn bitxor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_xor = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_a = negate_carry(*ai, &mut carry_a);
*ai = negate_carry(twos_a ^ bi, &mut carry_xor);
}
debug_assert!(a.len() > b.len() || carry_a == 0);
match Ord::cmp(&a.len(), &b.len()) {
Greater => {
for ai in a[b.len()..].iter_mut() {
let twos_a = negate_carry(*ai, &mut carry_a);
*ai = negate_carry(twos_a, &mut carry_xor);
}
debug_assert!(carry_a == 0);
}
Equal => {}
Less => {
let extra = &b[a.len()..];
a.extend(extra.iter().map(|&bi| {
let twos_a = !0;
negate_carry(twos_a ^ bi, &mut carry_xor)
}));
}
}
if carry_xor != 0 {
a.push(1);
}
}
// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe
// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe
// answer is pos, has length of longest
fn bitxor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
let mut carry_a = 1;
let mut carry_b = 1;
for (ai, &bi) in a.iter_mut().zip(b.iter()) {
let twos_a = negate_carry(*ai, &mut carry_a);
let twos_b = negate_carry(bi, &mut carry_b);
*ai = twos_a ^ twos_b;
}
debug_assert!(a.len() > b.len() || carry_a == 0);
debug_assert!(b.len() > a.len() || carry_b == 0);
match Ord::cmp(&a.len(), &b.len()) {
Greater => {
for ai in a[b.len()..].iter_mut() {
let twos_a = negate_carry(*ai, &mut carry_a);
let twos_b = !0;
*ai = twos_a ^ twos_b;
}
debug_assert!(carry_a == 0);
}
Equal => {}
Less => {
let extra = &b[a.len()..];
a.extend(extra.iter().map(|&bi| {
let twos_a = !0;
let twos_b = negate_carry(bi, &mut carry_b);
twos_a ^ twos_b
}));
debug_assert!(carry_b == 0);
}
}
}
forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor);
impl BitXor<&BigInt> for BigInt {
type Output = BigInt;
#[inline]
fn bitxor(mut self, other: &BigInt) -> BigInt {
self ^= other;
self
}
}
forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign);
impl BitXorAssign<&BigInt> for BigInt {
fn bitxor_assign(&mut self, other: &BigInt) {
match (self.sign, other.sign) {
(_, NoSign) => {}
(NoSign, _) => self.clone_from(other),
(Plus, Plus) => {
self.data ^= &other.data;
if self.data.is_zero() {
self.sign = NoSign;
}
}
(Plus, Minus) => {
bitxor_pos_neg(self.digits_mut(), other.digits());
self.sign = Minus;
self.normalize();
}
(Minus, Plus) => {
bitxor_neg_pos(self.digits_mut(), other.digits());
self.normalize();
}
(Minus, Minus) => {
bitxor_neg_neg(self.digits_mut(), other.digits());
self.sign = Plus;
self.normalize();
}
}
}
}
pub(super) fn set_negative_bit(x: &mut BigInt, bit: u64, value: bool) {
debug_assert_eq!(x.sign, Minus);
let data = &mut x.data;
let bits_per_digit = u64::from(big_digit::BITS);
if bit >= bits_per_digit * data.len() as u64 {
if !value {
data.set_bit(bit, true);
}
} else {
// If the Uint number is
// ... 0 x 1 0 ... 0
// then the two's complement is
// ... 1 !x 1 0 ... 0
// |-- bit at position 'trailing_zeros'
// where !x is obtained from x by flipping each bit
let trailing_zeros = data.trailing_zeros().unwrap();
if bit > trailing_zeros {
data.set_bit(bit, !value);
} else if bit == trailing_zeros && !value {
// Clearing the bit at position `trailing_zeros` is dealt with by doing
// similarly to what `bitand_neg_pos` does, except we start at digit
// `bit_index`. All digits below `bit_index` are guaranteed to be zero,
// so initially we have `carry_in` = `carry_out` = 1. Furthermore, we
// stop traversing the digits when there are no more carries.
let bit_index = (bit / bits_per_digit).to_usize().unwrap();
let bit_mask = (1 as BigDigit) << (bit % bits_per_digit);
let mut digit_iter = data.digits_mut().iter_mut().skip(bit_index);
let mut carry_in = 1;
let mut carry_out = 1;
let digit = digit_iter.next().unwrap();
let twos_in = negate_carry(*digit, &mut carry_in);
let twos_out = twos_in & !bit_mask;
*digit = negate_carry(twos_out, &mut carry_out);
for digit in digit_iter {
if carry_in == 0 && carry_out == 0 {
// Exit the loop since no more digits can change
break;
}
let twos = negate_carry(*digit, &mut carry_in);
*digit = negate_carry(twos, &mut carry_out);
}
if carry_out != 0 {
// All digits have been traversed and there is a carry
debug_assert_eq!(carry_in, 0);
data.digits_mut().push(1);
}
} else if bit < trailing_zeros && value {
// Flip each bit from position 'bit' to 'trailing_zeros', both inclusive
// ... 1 !x 1 0 ... 0 ... 0
// |-- bit at position 'bit'
// |-- bit at position 'trailing_zeros'
// bit_mask: 1 1 ... 1 0 .. 0
// This is done by xor'ing with the bit_mask
let index_lo = (bit / bits_per_digit).to_usize().unwrap();
let index_hi = (trailing_zeros / bits_per_digit).to_usize().unwrap();
let bit_mask_lo = big_digit::MAX << (bit % bits_per_digit);
let bit_mask_hi =
big_digit::MAX >> (bits_per_digit - 1 - (trailing_zeros % bits_per_digit));
let digits = data.digits_mut();
if index_lo == index_hi {
digits[index_lo] ^= bit_mask_lo & bit_mask_hi;
} else {
digits[index_lo] = bit_mask_lo;
for digit in &mut digits[index_lo + 1..index_hi] {
*digit = big_digit::MAX;
}
digits[index_hi] ^= bit_mask_hi;
}
} else {
// We end up here in two cases:
// bit == trailing_zeros && value: Bit is already set
// bit < trailing_zeros && !value: Bit is already cleared
}
}
}

472
vendor/num-bigint/src/bigint/convert.rs vendored Normal file
View File

@@ -0,0 +1,472 @@
use super::Sign::{self, Minus, NoSign, Plus};
use super::{BigInt, ToBigInt};
use crate::TryFromBigIntError;
use crate::{BigUint, ParseBigIntError, ToBigUint};
use alloc::vec::Vec;
use core::cmp::Ordering::{Equal, Greater, Less};
use core::convert::TryFrom;
use core::str::{self, FromStr};
use num_traits::{FromPrimitive, Num, One, ToPrimitive, Zero};
impl FromStr for BigInt {
type Err = ParseBigIntError;
#[inline]
fn from_str(s: &str) -> Result<BigInt, ParseBigIntError> {
BigInt::from_str_radix(s, 10)
}
}
impl Num for BigInt {
type FromStrRadixErr = ParseBigIntError;
/// Creates and initializes a [`BigInt`].
#[inline]
fn from_str_radix(mut s: &str, radix: u32) -> Result<BigInt, ParseBigIntError> {
let sign = if let Some(tail) = s.strip_prefix('-') {
if !tail.starts_with('+') {
s = tail
}
Minus
} else {
Plus
};
let bu = BigUint::from_str_radix(s, radix)?;
Ok(BigInt::from_biguint(sign, bu))
}
}
impl ToPrimitive for BigInt {
#[inline]
fn to_i64(&self) -> Option<i64> {
match self.sign {
Plus => self.data.to_i64(),
NoSign => Some(0),
Minus => {
let n = self.data.to_u64()?;
let m: u64 = 1 << 63;
match n.cmp(&m) {
Less => Some(-(n as i64)),
Equal => Some(i64::MIN),
Greater => None,
}
}
}
}
#[inline]
fn to_i128(&self) -> Option<i128> {
match self.sign {
Plus => self.data.to_i128(),
NoSign => Some(0),
Minus => {
let n = self.data.to_u128()?;
let m: u128 = 1 << 127;
match n.cmp(&m) {
Less => Some(-(n as i128)),
Equal => Some(i128::MIN),
Greater => None,
}
}
}
}
#[inline]
fn to_u64(&self) -> Option<u64> {
match self.sign {
Plus => self.data.to_u64(),
NoSign => Some(0),
Minus => None,
}
}
#[inline]
fn to_u128(&self) -> Option<u128> {
match self.sign {
Plus => self.data.to_u128(),
NoSign => Some(0),
Minus => None,
}
}
#[inline]
fn to_f32(&self) -> Option<f32> {
let n = self.data.to_f32()?;
Some(if self.sign == Minus { -n } else { n })
}
#[inline]
fn to_f64(&self) -> Option<f64> {
let n = self.data.to_f64()?;
Some(if self.sign == Minus { -n } else { n })
}
}
macro_rules! impl_try_from_bigint {
($T:ty, $to_ty:path) => {
impl TryFrom<&BigInt> for $T {
type Error = TryFromBigIntError<()>;
#[inline]
fn try_from(value: &BigInt) -> Result<$T, TryFromBigIntError<()>> {
$to_ty(value).ok_or(TryFromBigIntError::new(()))
}
}
impl TryFrom<BigInt> for $T {
type Error = TryFromBigIntError<BigInt>;
#[inline]
fn try_from(value: BigInt) -> Result<$T, TryFromBigIntError<BigInt>> {
<$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value))
}
}
};
}
impl_try_from_bigint!(u8, ToPrimitive::to_u8);
impl_try_from_bigint!(u16, ToPrimitive::to_u16);
impl_try_from_bigint!(u32, ToPrimitive::to_u32);
impl_try_from_bigint!(u64, ToPrimitive::to_u64);
impl_try_from_bigint!(usize, ToPrimitive::to_usize);
impl_try_from_bigint!(u128, ToPrimitive::to_u128);
impl_try_from_bigint!(i8, ToPrimitive::to_i8);
impl_try_from_bigint!(i16, ToPrimitive::to_i16);
impl_try_from_bigint!(i32, ToPrimitive::to_i32);
impl_try_from_bigint!(i64, ToPrimitive::to_i64);
impl_try_from_bigint!(isize, ToPrimitive::to_isize);
impl_try_from_bigint!(i128, ToPrimitive::to_i128);
impl FromPrimitive for BigInt {
#[inline]
fn from_i64(n: i64) -> Option<BigInt> {
Some(BigInt::from(n))
}
#[inline]
fn from_i128(n: i128) -> Option<BigInt> {
Some(BigInt::from(n))
}
#[inline]
fn from_u64(n: u64) -> Option<BigInt> {
Some(BigInt::from(n))
}
#[inline]
fn from_u128(n: u128) -> Option<BigInt> {
Some(BigInt::from(n))
}
#[inline]
fn from_f64(n: f64) -> Option<BigInt> {
if n >= 0.0 {
BigUint::from_f64(n).map(BigInt::from)
} else {
let x = BigUint::from_f64(-n)?;
Some(-BigInt::from(x))
}
}
}
impl From<i64> for BigInt {
#[inline]
fn from(n: i64) -> Self {
if n >= 0 {
BigInt::from(n as u64)
} else {
let u = u64::MAX - (n as u64) + 1;
BigInt {
sign: Minus,
data: BigUint::from(u),
}
}
}
}
impl From<i128> for BigInt {
#[inline]
fn from(n: i128) -> Self {
if n >= 0 {
BigInt::from(n as u128)
} else {
let u = u128::MAX - (n as u128) + 1;
BigInt {
sign: Minus,
data: BigUint::from(u),
}
}
}
}
macro_rules! impl_bigint_from_int {
($T:ty) => {
impl From<$T> for BigInt {
#[inline]
fn from(n: $T) -> Self {
BigInt::from(n as i64)
}
}
};
}
impl_bigint_from_int!(i8);
impl_bigint_from_int!(i16);
impl_bigint_from_int!(i32);
impl_bigint_from_int!(isize);
impl From<u64> for BigInt {
#[inline]
fn from(n: u64) -> Self {
if n > 0 {
BigInt {
sign: Plus,
data: BigUint::from(n),
}
} else {
Self::ZERO
}
}
}
impl From<u128> for BigInt {
#[inline]
fn from(n: u128) -> Self {
if n > 0 {
BigInt {
sign: Plus,
data: BigUint::from(n),
}
} else {
Self::ZERO
}
}
}
macro_rules! impl_bigint_from_uint {
($T:ty) => {
impl From<$T> for BigInt {
#[inline]
fn from(n: $T) -> Self {
BigInt::from(n as u64)
}
}
};
}
impl_bigint_from_uint!(u8);
impl_bigint_from_uint!(u16);
impl_bigint_from_uint!(u32);
impl_bigint_from_uint!(usize);
impl From<BigUint> for BigInt {
#[inline]
fn from(n: BigUint) -> Self {
if n.is_zero() {
Self::ZERO
} else {
BigInt {
sign: Plus,
data: n,
}
}
}
}
impl ToBigInt for BigInt {
#[inline]
fn to_bigint(&self) -> Option<BigInt> {
Some(self.clone())
}
}
impl ToBigInt for BigUint {
#[inline]
fn to_bigint(&self) -> Option<BigInt> {
if self.is_zero() {
Some(BigInt::ZERO)
} else {
Some(BigInt {
sign: Plus,
data: self.clone(),
})
}
}
}
impl ToBigUint for BigInt {
#[inline]
fn to_biguint(&self) -> Option<BigUint> {
match self.sign() {
Plus => Some(self.data.clone()),
NoSign => Some(BigUint::ZERO),
Minus => None,
}
}
}
impl TryFrom<&BigInt> for BigUint {
type Error = TryFromBigIntError<()>;
#[inline]
fn try_from(value: &BigInt) -> Result<BigUint, TryFromBigIntError<()>> {
value
.to_biguint()
.ok_or_else(|| TryFromBigIntError::new(()))
}
}
impl TryFrom<BigInt> for BigUint {
type Error = TryFromBigIntError<BigInt>;
#[inline]
fn try_from(value: BigInt) -> Result<BigUint, TryFromBigIntError<BigInt>> {
if value.sign() == Sign::Minus {
Err(TryFromBigIntError::new(value))
} else {
Ok(value.data)
}
}
}
macro_rules! impl_to_bigint {
($T:ty, $from_ty:path) => {
impl ToBigInt for $T {
#[inline]
fn to_bigint(&self) -> Option<BigInt> {
$from_ty(*self)
}
}
};
}
impl_to_bigint!(isize, FromPrimitive::from_isize);
impl_to_bigint!(i8, FromPrimitive::from_i8);
impl_to_bigint!(i16, FromPrimitive::from_i16);
impl_to_bigint!(i32, FromPrimitive::from_i32);
impl_to_bigint!(i64, FromPrimitive::from_i64);
impl_to_bigint!(i128, FromPrimitive::from_i128);
impl_to_bigint!(usize, FromPrimitive::from_usize);
impl_to_bigint!(u8, FromPrimitive::from_u8);
impl_to_bigint!(u16, FromPrimitive::from_u16);
impl_to_bigint!(u32, FromPrimitive::from_u32);
impl_to_bigint!(u64, FromPrimitive::from_u64);
impl_to_bigint!(u128, FromPrimitive::from_u128);
impl_to_bigint!(f32, FromPrimitive::from_f32);
impl_to_bigint!(f64, FromPrimitive::from_f64);
impl From<bool> for BigInt {
fn from(x: bool) -> Self {
if x {
One::one()
} else {
Self::ZERO
}
}
}
#[inline]
pub(super) fn from_signed_bytes_be(digits: &[u8]) -> BigInt {
let sign = match digits.first() {
Some(v) if *v > 0x7f => Sign::Minus,
Some(_) => Sign::Plus,
None => return BigInt::ZERO,
};
if sign == Sign::Minus {
// two's-complement the content to retrieve the magnitude
let mut digits = Vec::from(digits);
twos_complement_be(&mut digits);
BigInt::from_biguint(sign, BigUint::from_bytes_be(&digits))
} else {
BigInt::from_biguint(sign, BigUint::from_bytes_be(digits))
}
}
#[inline]
pub(super) fn from_signed_bytes_le(digits: &[u8]) -> BigInt {
let sign = match digits.last() {
Some(v) if *v > 0x7f => Sign::Minus,
Some(_) => Sign::Plus,
None => return BigInt::ZERO,
};
if sign == Sign::Minus {
// two's-complement the content to retrieve the magnitude
let mut digits = Vec::from(digits);
twos_complement_le(&mut digits);
BigInt::from_biguint(sign, BigUint::from_bytes_le(&digits))
} else {
BigInt::from_biguint(sign, BigUint::from_bytes_le(digits))
}
}
#[inline]
pub(super) fn to_signed_bytes_be(x: &BigInt) -> Vec<u8> {
let mut bytes = x.data.to_bytes_be();
let first_byte = bytes.first().cloned().unwrap_or(0);
if first_byte > 0x7f
&& !(first_byte == 0x80 && bytes.iter().skip(1).all(Zero::is_zero) && x.sign == Sign::Minus)
{
// msb used by magnitude, extend by 1 byte
bytes.insert(0, 0);
}
if x.sign == Sign::Minus {
twos_complement_be(&mut bytes);
}
bytes
}
#[inline]
pub(super) fn to_signed_bytes_le(x: &BigInt) -> Vec<u8> {
let mut bytes = x.data.to_bytes_le();
let last_byte = bytes.last().cloned().unwrap_or(0);
if last_byte > 0x7f
&& !(last_byte == 0x80
&& bytes.iter().rev().skip(1).all(Zero::is_zero)
&& x.sign == Sign::Minus)
{
// msb used by magnitude, extend by 1 byte
bytes.push(0);
}
if x.sign == Sign::Minus {
twos_complement_le(&mut bytes);
}
bytes
}
/// Perform in-place two's complement of the given binary representation,
/// in little-endian byte order.
#[inline]
fn twos_complement_le(digits: &mut [u8]) {
twos_complement(digits)
}
/// Perform in-place two's complement of the given binary representation
/// in big-endian byte order.
#[inline]
fn twos_complement_be(digits: &mut [u8]) {
twos_complement(digits.iter_mut().rev())
}
/// Perform in-place two's complement of the given digit iterator
/// starting from the least significant byte.
#[inline]
fn twos_complement<'a, I>(digits: I)
where
I: IntoIterator<Item = &'a mut u8>,
{
let mut carry = true;
for d in digits {
*d = !*d;
if carry {
*d = d.wrapping_add(1);
carry = d.is_zero();
}
}
}

513
vendor/num-bigint/src/bigint/division.rs vendored Normal file
View File

@@ -0,0 +1,513 @@
use super::CheckedUnsignedAbs::{Negative, Positive};
use super::Sign::NoSign;
use super::{BigInt, UnsignedAbs};
use crate::{IsizePromotion, UsizePromotion};
use core::ops::{Div, DivAssign, Rem, RemAssign};
use num_integer::Integer;
use num_traits::{CheckedDiv, CheckedEuclid, Euclid, Signed, ToPrimitive, Zero};
forward_all_binop_to_ref_ref!(impl Div for BigInt, div);
impl Div<&BigInt> for &BigInt {
type Output = BigInt;
#[inline]
fn div(self, other: &BigInt) -> BigInt {
let (q, _) = self.div_rem(other);
q
}
}
impl DivAssign<&BigInt> for BigInt {
#[inline]
fn div_assign(&mut self, other: &BigInt) {
*self = &*self / other;
}
}
forward_val_assign!(impl DivAssign for BigInt, div_assign);
promote_all_scalars!(impl Div for BigInt, div);
promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign);
forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigInt, div);
forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigInt, div);
forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigInt, div);
impl Div<u32> for BigInt {
type Output = BigInt;
#[inline]
fn div(self, other: u32) -> BigInt {
BigInt::from_biguint(self.sign, self.data / other)
}
}
impl DivAssign<u32> for BigInt {
#[inline]
fn div_assign(&mut self, other: u32) {
self.data /= other;
if self.data.is_zero() {
self.sign = NoSign;
}
}
}
impl Div<BigInt> for u32 {
type Output = BigInt;
#[inline]
fn div(self, other: BigInt) -> BigInt {
BigInt::from_biguint(other.sign, self / other.data)
}
}
impl Div<u64> for BigInt {
type Output = BigInt;
#[inline]
fn div(self, other: u64) -> BigInt {
BigInt::from_biguint(self.sign, self.data / other)
}
}
impl DivAssign<u64> for BigInt {
#[inline]
fn div_assign(&mut self, other: u64) {
self.data /= other;
if self.data.is_zero() {
self.sign = NoSign;
}
}
}
impl Div<BigInt> for u64 {
type Output = BigInt;
#[inline]
fn div(self, other: BigInt) -> BigInt {
BigInt::from_biguint(other.sign, self / other.data)
}
}
impl Div<u128> for BigInt {
type Output = BigInt;
#[inline]
fn div(self, other: u128) -> BigInt {
BigInt::from_biguint(self.sign, self.data / other)
}
}
impl DivAssign<u128> for BigInt {
#[inline]
fn div_assign(&mut self, other: u128) {
self.data /= other;
if self.data.is_zero() {
self.sign = NoSign;
}
}
}
impl Div<BigInt> for u128 {
type Output = BigInt;
#[inline]
fn div(self, other: BigInt) -> BigInt {
BigInt::from_biguint(other.sign, self / other.data)
}
}
forward_all_scalar_binop_to_val_val!(impl Div<i32> for BigInt, div);
forward_all_scalar_binop_to_val_val!(impl Div<i64> for BigInt, div);
forward_all_scalar_binop_to_val_val!(impl Div<i128> for BigInt, div);
impl Div<i32> for BigInt {
type Output = BigInt;
#[inline]
fn div(self, other: i32) -> BigInt {
match other.checked_uabs() {
Positive(u) => self / u,
Negative(u) => -self / u,
}
}
}
impl DivAssign<i32> for BigInt {
#[inline]
fn div_assign(&mut self, other: i32) {
match other.checked_uabs() {
Positive(u) => *self /= u,
Negative(u) => {
self.sign = -self.sign;
*self /= u;
}
}
}
}
impl Div<BigInt> for i32 {
type Output = BigInt;
#[inline]
fn div(self, other: BigInt) -> BigInt {
match self.checked_uabs() {
Positive(u) => u / other,
Negative(u) => u / -other,
}
}
}
impl Div<i64> for BigInt {
type Output = BigInt;
#[inline]
fn div(self, other: i64) -> BigInt {
match other.checked_uabs() {
Positive(u) => self / u,
Negative(u) => -self / u,
}
}
}
impl DivAssign<i64> for BigInt {
#[inline]
fn div_assign(&mut self, other: i64) {
match other.checked_uabs() {
Positive(u) => *self /= u,
Negative(u) => {
self.sign = -self.sign;
*self /= u;
}
}
}
}
impl Div<BigInt> for i64 {
type Output = BigInt;
#[inline]
fn div(self, other: BigInt) -> BigInt {
match self.checked_uabs() {
Positive(u) => u / other,
Negative(u) => u / -other,
}
}
}
impl Div<i128> for BigInt {
type Output = BigInt;
#[inline]
fn div(self, other: i128) -> BigInt {
match other.checked_uabs() {
Positive(u) => self / u,
Negative(u) => -self / u,
}
}
}
impl DivAssign<i128> for BigInt {
#[inline]
fn div_assign(&mut self, other: i128) {
match other.checked_uabs() {
Positive(u) => *self /= u,
Negative(u) => {
self.sign = -self.sign;
*self /= u;
}
}
}
}
impl Div<BigInt> for i128 {
type Output = BigInt;
#[inline]
fn div(self, other: BigInt) -> BigInt {
match self.checked_uabs() {
Positive(u) => u / other,
Negative(u) => u / -other,
}
}
}
forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem);
impl Rem<&BigInt> for &BigInt {
type Output = BigInt;
#[inline]
fn rem(self, other: &BigInt) -> BigInt {
if let Some(other) = other.to_u32() {
self % other
} else if let Some(other) = other.to_i32() {
self % other
} else {
let (_, r) = self.div_rem(other);
r
}
}
}
impl RemAssign<&BigInt> for BigInt {
#[inline]
fn rem_assign(&mut self, other: &BigInt) {
*self = &*self % other;
}
}
forward_val_assign!(impl RemAssign for BigInt, rem_assign);
promote_all_scalars!(impl Rem for BigInt, rem);
promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign);
forward_all_scalar_binop_to_val_val!(impl Rem<u32> for BigInt, rem);
forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigInt, rem);
forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigInt, rem);
impl Rem<u32> for BigInt {
type Output = BigInt;
#[inline]
fn rem(self, other: u32) -> BigInt {
BigInt::from_biguint(self.sign, self.data % other)
}
}
impl RemAssign<u32> for BigInt {
#[inline]
fn rem_assign(&mut self, other: u32) {
self.data %= other;
if self.data.is_zero() {
self.sign = NoSign;
}
}
}
impl Rem<BigInt> for u32 {
type Output = BigInt;
#[inline]
fn rem(self, other: BigInt) -> BigInt {
BigInt::from(self % other.data)
}
}
impl Rem<u64> for BigInt {
type Output = BigInt;
#[inline]
fn rem(self, other: u64) -> BigInt {
BigInt::from_biguint(self.sign, self.data % other)
}
}
impl RemAssign<u64> for BigInt {
#[inline]
fn rem_assign(&mut self, other: u64) {
self.data %= other;
if self.data.is_zero() {
self.sign = NoSign;
}
}
}
impl Rem<BigInt> for u64 {
type Output = BigInt;
#[inline]
fn rem(self, other: BigInt) -> BigInt {
BigInt::from(self % other.data)
}
}
impl Rem<u128> for BigInt {
type Output = BigInt;
#[inline]
fn rem(self, other: u128) -> BigInt {
BigInt::from_biguint(self.sign, self.data % other)
}
}
impl RemAssign<u128> for BigInt {
#[inline]
fn rem_assign(&mut self, other: u128) {
self.data %= other;
if self.data.is_zero() {
self.sign = NoSign;
}
}
}
impl Rem<BigInt> for u128 {
type Output = BigInt;
#[inline]
fn rem(self, other: BigInt) -> BigInt {
BigInt::from(self % other.data)
}
}
forward_all_scalar_binop_to_val_val!(impl Rem<i32> for BigInt, rem);
forward_all_scalar_binop_to_val_val!(impl Rem<i64> for BigInt, rem);
forward_all_scalar_binop_to_val_val!(impl Rem<i128> for BigInt, rem);
impl Rem<i32> for BigInt {
type Output = BigInt;
#[inline]
fn rem(self, other: i32) -> BigInt {
self % other.unsigned_abs()
}
}
impl RemAssign<i32> for BigInt {
#[inline]
fn rem_assign(&mut self, other: i32) {
*self %= other.unsigned_abs();
}
}
impl Rem<BigInt> for i32 {
type Output = BigInt;
#[inline]
fn rem(self, other: BigInt) -> BigInt {
match self.checked_uabs() {
Positive(u) => u % other,
Negative(u) => -(u % other),
}
}
}
impl Rem<i64> for BigInt {
type Output = BigInt;
#[inline]
fn rem(self, other: i64) -> BigInt {
self % other.unsigned_abs()
}
}
impl RemAssign<i64> for BigInt {
#[inline]
fn rem_assign(&mut self, other: i64) {
*self %= other.unsigned_abs();
}
}
impl Rem<BigInt> for i64 {
type Output = BigInt;
#[inline]
fn rem(self, other: BigInt) -> BigInt {
match self.checked_uabs() {
Positive(u) => u % other,
Negative(u) => -(u % other),
}
}
}
impl Rem<i128> for BigInt {
type Output = BigInt;
#[inline]
fn rem(self, other: i128) -> BigInt {
self % other.unsigned_abs()
}
}
impl RemAssign<i128> for BigInt {
#[inline]
fn rem_assign(&mut self, other: i128) {
*self %= other.unsigned_abs();
}
}
impl Rem<BigInt> for i128 {
type Output = BigInt;
#[inline]
fn rem(self, other: BigInt) -> BigInt {
match self.checked_uabs() {
Positive(u) => u % other,
Negative(u) => -(u % other),
}
}
}
impl CheckedDiv for BigInt {
#[inline]
fn checked_div(&self, v: &BigInt) -> Option<BigInt> {
if v.is_zero() {
return None;
}
Some(self.div(v))
}
}
impl CheckedEuclid for BigInt {
#[inline]
fn checked_div_euclid(&self, v: &BigInt) -> Option<BigInt> {
if v.is_zero() {
return None;
}
Some(self.div_euclid(v))
}
#[inline]
fn checked_rem_euclid(&self, v: &BigInt) -> Option<BigInt> {
if v.is_zero() {
return None;
}
Some(self.rem_euclid(v))
}
fn checked_div_rem_euclid(&self, v: &Self) -> Option<(Self, Self)> {
Some(self.div_rem_euclid(v))
}
}
impl Euclid for BigInt {
#[inline]
fn div_euclid(&self, v: &BigInt) -> BigInt {
let (q, r) = self.div_rem(v);
if r.is_negative() {
if v.is_positive() {
q - 1
} else {
q + 1
}
} else {
q
}
}
#[inline]
fn rem_euclid(&self, v: &BigInt) -> BigInt {
let r = self % v;
if r.is_negative() {
if v.is_positive() {
r + v
} else {
r - v
}
} else {
r
}
}
fn div_rem_euclid(&self, v: &Self) -> (Self, Self) {
let (q, r) = self.div_rem(v);
if r.is_negative() {
if v.is_positive() {
(q - 1, r + v)
} else {
(q + 1, r - v)
}
} else {
(q, r)
}
}
}

View File

@@ -0,0 +1,217 @@
use super::CheckedUnsignedAbs::{Negative, Positive};
use super::Sign::{self, Minus, NoSign, Plus};
use super::{BigInt, UnsignedAbs};
use crate::{IsizePromotion, UsizePromotion};
use core::iter::Product;
use core::ops::{Mul, MulAssign};
use num_traits::{CheckedMul, One, Zero};
impl Mul<Sign> for Sign {
type Output = Sign;
#[inline]
fn mul(self, other: Sign) -> Sign {
match (self, other) {
(NoSign, _) | (_, NoSign) => NoSign,
(Plus, Plus) | (Minus, Minus) => Plus,
(Plus, Minus) | (Minus, Plus) => Minus,
}
}
}
macro_rules! impl_mul {
($(impl Mul<$Other:ty> for $Self:ty;)*) => {$(
impl Mul<$Other> for $Self {
type Output = BigInt;
#[inline]
fn mul(self, other: $Other) -> BigInt {
// automatically match value/ref
let BigInt { data: x, .. } = self;
let BigInt { data: y, .. } = other;
BigInt::from_biguint(self.sign * other.sign, x * y)
}
}
)*}
}
impl_mul! {
impl Mul<BigInt> for BigInt;
impl Mul<BigInt> for &BigInt;
impl Mul<&BigInt> for BigInt;
impl Mul<&BigInt> for &BigInt;
}
macro_rules! impl_mul_assign {
($(impl MulAssign<$Other:ty> for BigInt;)*) => {$(
impl MulAssign<$Other> for BigInt {
#[inline]
fn mul_assign(&mut self, other: $Other) {
// automatically match value/ref
let BigInt { data: y, .. } = other;
self.data *= y;
if self.data.is_zero() {
self.sign = NoSign;
} else {
self.sign = self.sign * other.sign;
}
}
}
)*}
}
impl_mul_assign! {
impl MulAssign<BigInt> for BigInt;
impl MulAssign<&BigInt> for BigInt;
}
promote_all_scalars!(impl Mul for BigInt, mul);
promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign);
forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigInt, mul);
forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigInt, mul);
forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigInt, mul);
impl Mul<u32> for BigInt {
type Output = BigInt;
#[inline]
fn mul(self, other: u32) -> BigInt {
BigInt::from_biguint(self.sign, self.data * other)
}
}
impl MulAssign<u32> for BigInt {
#[inline]
fn mul_assign(&mut self, other: u32) {
self.data *= other;
if self.data.is_zero() {
self.sign = NoSign;
}
}
}
impl Mul<u64> for BigInt {
type Output = BigInt;
#[inline]
fn mul(self, other: u64) -> BigInt {
BigInt::from_biguint(self.sign, self.data * other)
}
}
impl MulAssign<u64> for BigInt {
#[inline]
fn mul_assign(&mut self, other: u64) {
self.data *= other;
if self.data.is_zero() {
self.sign = NoSign;
}
}
}
impl Mul<u128> for BigInt {
type Output = BigInt;
#[inline]
fn mul(self, other: u128) -> BigInt {
BigInt::from_biguint(self.sign, self.data * other)
}
}
impl MulAssign<u128> for BigInt {
#[inline]
fn mul_assign(&mut self, other: u128) {
self.data *= other;
if self.data.is_zero() {
self.sign = NoSign;
}
}
}
forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i32> for BigInt, mul);
forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i64> for BigInt, mul);
forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i128> for BigInt, mul);
impl Mul<i32> for BigInt {
type Output = BigInt;
#[inline]
fn mul(self, other: i32) -> BigInt {
match other.checked_uabs() {
Positive(u) => self * u,
Negative(u) => -self * u,
}
}
}
impl MulAssign<i32> for BigInt {
#[inline]
fn mul_assign(&mut self, other: i32) {
match other.checked_uabs() {
Positive(u) => *self *= u,
Negative(u) => {
self.sign = -self.sign;
self.data *= u;
}
}
}
}
impl Mul<i64> for BigInt {
type Output = BigInt;
#[inline]
fn mul(self, other: i64) -> BigInt {
match other.checked_uabs() {
Positive(u) => self * u,
Negative(u) => -self * u,
}
}
}
impl MulAssign<i64> for BigInt {
#[inline]
fn mul_assign(&mut self, other: i64) {
match other.checked_uabs() {
Positive(u) => *self *= u,
Negative(u) => {
self.sign = -self.sign;
self.data *= u;
}
}
}
}
impl Mul<i128> for BigInt {
type Output = BigInt;
#[inline]
fn mul(self, other: i128) -> BigInt {
match other.checked_uabs() {
Positive(u) => self * u,
Negative(u) => -self * u,
}
}
}
impl MulAssign<i128> for BigInt {
#[inline]
fn mul_assign(&mut self, other: i128) {
match other.checked_uabs() {
Positive(u) => *self *= u,
Negative(u) => {
self.sign = -self.sign;
self.data *= u;
}
}
}
}
impl CheckedMul for BigInt {
#[inline]
fn checked_mul(&self, v: &BigInt) -> Option<BigInt> {
Some(self.mul(v))
}
}
impl_product_iter_type!(BigInt);

94
vendor/num-bigint/src/bigint/power.rs vendored Normal file
View File

@@ -0,0 +1,94 @@
use super::BigInt;
use super::Sign::{self, Minus, Plus};
use crate::BigUint;
use num_integer::Integer;
use num_traits::{Pow, Signed, Zero};
/// Help function for pow
///
/// Computes the effect of the exponent on the sign.
#[inline]
fn powsign<T: Integer>(sign: Sign, other: &T) -> Sign {
if other.is_zero() {
Plus
} else if sign != Minus || other.is_odd() {
sign
} else {
-sign
}
}
macro_rules! pow_impl {
($T:ty) => {
impl Pow<$T> for BigInt {
type Output = BigInt;
#[inline]
fn pow(self, rhs: $T) -> BigInt {
BigInt::from_biguint(powsign(self.sign, &rhs), self.data.pow(rhs))
}
}
impl Pow<&$T> for BigInt {
type Output = BigInt;
#[inline]
fn pow(self, rhs: &$T) -> BigInt {
BigInt::from_biguint(powsign(self.sign, rhs), self.data.pow(rhs))
}
}
impl Pow<$T> for &BigInt {
type Output = BigInt;
#[inline]
fn pow(self, rhs: $T) -> BigInt {
BigInt::from_biguint(powsign(self.sign, &rhs), Pow::pow(&self.data, rhs))
}
}
impl Pow<&$T> for &BigInt {
type Output = BigInt;
#[inline]
fn pow(self, rhs: &$T) -> BigInt {
BigInt::from_biguint(powsign(self.sign, rhs), Pow::pow(&self.data, rhs))
}
}
};
}
pow_impl!(u8);
pow_impl!(u16);
pow_impl!(u32);
pow_impl!(u64);
pow_impl!(usize);
pow_impl!(u128);
pow_impl!(BigUint);
pub(super) fn modpow(x: &BigInt, exponent: &BigInt, modulus: &BigInt) -> BigInt {
assert!(
!exponent.is_negative(),
"negative exponentiation is not supported!"
);
assert!(
!modulus.is_zero(),
"attempt to calculate with zero modulus!"
);
let result = x.data.modpow(&exponent.data, &modulus.data);
if result.is_zero() {
return BigInt::ZERO;
}
// The sign of the result follows the modulus, like `mod_floor`.
let (sign, mag) = match (x.is_negative() && exponent.is_odd(), modulus.is_negative()) {
(false, false) => (Plus, result),
(true, false) => (Plus, &modulus.data - result),
(false, true) => (Minus, &modulus.data - result),
(true, true) => (Minus, result),
};
BigInt::from_biguint(sign, mag)
}

61
vendor/num-bigint/src/bigint/serde.rs vendored Normal file
View File

@@ -0,0 +1,61 @@
#![cfg(feature = "serde")]
#![cfg_attr(docsrs, doc(cfg(feature = "serde")))]
use super::{BigInt, Sign};
use serde::de::{Error, Unexpected};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
impl Serialize for Sign {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// Note: do not change the serialization format, or it may break
// forward and backward compatibility of serialized data!
match *self {
Sign::Minus => (-1i8).serialize(serializer),
Sign::NoSign => 0i8.serialize(serializer),
Sign::Plus => 1i8.serialize(serializer),
}
}
}
impl<'de> Deserialize<'de> for Sign {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let sign = i8::deserialize(deserializer)?;
match sign {
-1 => Ok(Sign::Minus),
0 => Ok(Sign::NoSign),
1 => Ok(Sign::Plus),
_ => Err(D::Error::invalid_value(
Unexpected::Signed(sign.into()),
&"a sign of -1, 0, or 1",
)),
}
}
}
impl Serialize for BigInt {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// Note: do not change the serialization format, or it may break
// forward and backward compatibility of serialized data!
(self.sign, &self.data).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for BigInt {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let (sign, data) = Deserialize::deserialize(deserializer)?;
Ok(BigInt::from_biguint(sign, data))
}
}

107
vendor/num-bigint/src/bigint/shift.rs vendored Normal file
View File

@@ -0,0 +1,107 @@
use super::BigInt;
use super::Sign::NoSign;
use core::ops::{Shl, ShlAssign, Shr, ShrAssign};
use num_traits::{PrimInt, Signed, Zero};
macro_rules! impl_shift {
(@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => {
impl $Shx<&$rhs> for BigInt {
type Output = BigInt;
#[inline]
fn $shx(self, rhs: &$rhs) -> BigInt {
$Shx::$shx(self, *rhs)
}
}
impl $Shx<&$rhs> for &BigInt {
type Output = BigInt;
#[inline]
fn $shx(self, rhs: &$rhs) -> BigInt {
$Shx::$shx(self, *rhs)
}
}
impl $ShxAssign<&$rhs> for BigInt {
#[inline]
fn $shx_assign(&mut self, rhs: &$rhs) {
$ShxAssign::$shx_assign(self, *rhs);
}
}
};
($($rhs:ty),+) => {$(
impl Shl<$rhs> for BigInt {
type Output = BigInt;
#[inline]
fn shl(self, rhs: $rhs) -> BigInt {
BigInt::from_biguint(self.sign, self.data << rhs)
}
}
impl Shl<$rhs> for &BigInt {
type Output = BigInt;
#[inline]
fn shl(self, rhs: $rhs) -> BigInt {
BigInt::from_biguint(self.sign, &self.data << rhs)
}
}
impl ShlAssign<$rhs> for BigInt {
#[inline]
fn shl_assign(&mut self, rhs: $rhs) {
self.data <<= rhs
}
}
impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs }
impl Shr<$rhs> for BigInt {
type Output = BigInt;
#[inline]
fn shr(self, rhs: $rhs) -> BigInt {
let round_down = shr_round_down(&self, rhs);
let data = self.data >> rhs;
let data = if round_down { data + 1u8 } else { data };
BigInt::from_biguint(self.sign, data)
}
}
impl Shr<$rhs> for &BigInt {
type Output = BigInt;
#[inline]
fn shr(self, rhs: $rhs) -> BigInt {
let round_down = shr_round_down(self, rhs);
let data = &self.data >> rhs;
let data = if round_down { data + 1u8 } else { data };
BigInt::from_biguint(self.sign, data)
}
}
impl ShrAssign<$rhs> for BigInt {
#[inline]
fn shr_assign(&mut self, rhs: $rhs) {
let round_down = shr_round_down(self, rhs);
self.data >>= rhs;
if round_down {
self.data += 1u8;
} else if self.data.is_zero() {
self.sign = NoSign;
}
}
}
impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs }
)*};
}
impl_shift! { u8, u16, u32, u64, u128, usize }
impl_shift! { i8, i16, i32, i64, i128, isize }
// Negative values need a rounding adjustment if there are any ones in the
// bits that are getting shifted out.
fn shr_round_down<T: PrimInt>(i: &BigInt, shift: T) -> bool {
if i.is_negative() {
let zeros = i.trailing_zeros().expect("negative values are non-zero");
shift > T::zero() && shift.to_u64().map(|shift| zeros < shift).unwrap_or(true)
} else {
false
}
}

View File

@@ -0,0 +1,300 @@
use super::CheckedUnsignedAbs::{Negative, Positive};
use super::Sign::{Minus, NoSign, Plus};
use super::{BigInt, UnsignedAbs};
use crate::{IsizePromotion, UsizePromotion};
use core::cmp::Ordering::{Equal, Greater, Less};
use core::mem;
use core::ops::{Sub, SubAssign};
use num_traits::CheckedSub;
// We want to forward to BigUint::sub, but it's not clear how that will go until
// we compare both sign and magnitude. So we duplicate this body for every
// val/ref combination, deferring that decision to BigUint's own forwarding.
macro_rules! bigint_sub {
($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
match ($a.sign, $b.sign) {
(_, NoSign) => $a_owned,
(NoSign, _) => -$b_owned,
// opposite signs => keep the sign of the left with the sum of magnitudes
(Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
// same sign => keep or toggle the sign of the left with the difference of magnitudes
(Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) {
Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data),
Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
Equal => BigInt::ZERO,
},
}
};
}
impl Sub<&BigInt> for &BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: &BigInt) -> BigInt {
bigint_sub!(
self,
self.clone(),
&self.data,
other,
other.clone(),
&other.data
)
}
}
impl Sub<BigInt> for &BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: BigInt) -> BigInt {
bigint_sub!(self, self.clone(), &self.data, other, other, other.data)
}
}
impl Sub<&BigInt> for BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: &BigInt) -> BigInt {
bigint_sub!(self, self, self.data, other, other.clone(), &other.data)
}
}
impl Sub<BigInt> for BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: BigInt) -> BigInt {
bigint_sub!(self, self, self.data, other, other, other.data)
}
}
impl SubAssign<&BigInt> for BigInt {
#[inline]
fn sub_assign(&mut self, other: &BigInt) {
let n = mem::replace(self, Self::ZERO);
*self = n - other;
}
}
forward_val_assign!(impl SubAssign for BigInt, sub_assign);
promote_all_scalars!(impl Sub for BigInt, sub);
promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign);
forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigInt, sub);
forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigInt, sub);
forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigInt, sub);
impl Sub<u32> for BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: u32) -> BigInt {
match self.sign {
NoSign => -BigInt::from(other),
Minus => -BigInt::from(self.data + other),
Plus => match self.data.cmp(&From::from(other)) {
Equal => Self::ZERO,
Greater => BigInt::from(self.data - other),
Less => -BigInt::from(other - self.data),
},
}
}
}
impl SubAssign<u32> for BigInt {
#[inline]
fn sub_assign(&mut self, other: u32) {
let n = mem::replace(self, Self::ZERO);
*self = n - other;
}
}
impl Sub<BigInt> for u32 {
type Output = BigInt;
#[inline]
fn sub(self, other: BigInt) -> BigInt {
-(other - self)
}
}
impl Sub<BigInt> for u64 {
type Output = BigInt;
#[inline]
fn sub(self, other: BigInt) -> BigInt {
-(other - self)
}
}
impl Sub<BigInt> for u128 {
type Output = BigInt;
#[inline]
fn sub(self, other: BigInt) -> BigInt {
-(other - self)
}
}
impl Sub<u64> for BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: u64) -> BigInt {
match self.sign {
NoSign => -BigInt::from(other),
Minus => -BigInt::from(self.data + other),
Plus => match self.data.cmp(&From::from(other)) {
Equal => Self::ZERO,
Greater => BigInt::from(self.data - other),
Less => -BigInt::from(other - self.data),
},
}
}
}
impl SubAssign<u64> for BigInt {
#[inline]
fn sub_assign(&mut self, other: u64) {
let n = mem::replace(self, Self::ZERO);
*self = n - other;
}
}
impl Sub<u128> for BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: u128) -> BigInt {
match self.sign {
NoSign => -BigInt::from(other),
Minus => -BigInt::from(self.data + other),
Plus => match self.data.cmp(&From::from(other)) {
Equal => Self::ZERO,
Greater => BigInt::from(self.data - other),
Less => -BigInt::from(other - self.data),
},
}
}
}
impl SubAssign<u128> for BigInt {
#[inline]
fn sub_assign(&mut self, other: u128) {
let n = mem::replace(self, Self::ZERO);
*self = n - other;
}
}
forward_all_scalar_binop_to_val_val!(impl Sub<i32> for BigInt, sub);
forward_all_scalar_binop_to_val_val!(impl Sub<i64> for BigInt, sub);
forward_all_scalar_binop_to_val_val!(impl Sub<i128> for BigInt, sub);
impl Sub<i32> for BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: i32) -> BigInt {
match other.checked_uabs() {
Positive(u) => self - u,
Negative(u) => self + u,
}
}
}
impl SubAssign<i32> for BigInt {
#[inline]
fn sub_assign(&mut self, other: i32) {
match other.checked_uabs() {
Positive(u) => *self -= u,
Negative(u) => *self += u,
}
}
}
impl Sub<BigInt> for i32 {
type Output = BigInt;
#[inline]
fn sub(self, other: BigInt) -> BigInt {
match self.checked_uabs() {
Positive(u) => u - other,
Negative(u) => -other - u,
}
}
}
impl Sub<i64> for BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: i64) -> BigInt {
match other.checked_uabs() {
Positive(u) => self - u,
Negative(u) => self + u,
}
}
}
impl SubAssign<i64> for BigInt {
#[inline]
fn sub_assign(&mut self, other: i64) {
match other.checked_uabs() {
Positive(u) => *self -= u,
Negative(u) => *self += u,
}
}
}
impl Sub<BigInt> for i64 {
type Output = BigInt;
#[inline]
fn sub(self, other: BigInt) -> BigInt {
match self.checked_uabs() {
Positive(u) => u - other,
Negative(u) => -other - u,
}
}
}
impl Sub<i128> for BigInt {
type Output = BigInt;
#[inline]
fn sub(self, other: i128) -> BigInt {
match other.checked_uabs() {
Positive(u) => self - u,
Negative(u) => self + u,
}
}
}
impl SubAssign<i128> for BigInt {
#[inline]
fn sub_assign(&mut self, other: i128) {
match other.checked_uabs() {
Positive(u) => *self -= u,
Negative(u) => *self += u,
}
}
}
impl Sub<BigInt> for i128 {
type Output = BigInt;
#[inline]
fn sub(self, other: BigInt) -> BigInt {
match self.checked_uabs() {
Positive(u) => u - other,
Negative(u) => -other - u,
}
}
}
impl CheckedSub for BigInt {
#[inline]
fn checked_sub(&self, v: &BigInt) -> Option<BigInt> {
Some(self.sub(v))
}
}

285
vendor/num-bigint/src/bigrand.rs vendored Normal file
View File

@@ -0,0 +1,285 @@
//! Randomization of big integers
#![cfg(feature = "rand")]
#![cfg_attr(docsrs, doc(cfg(feature = "rand")))]
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::biguint::biguint_from_vec;
use num_integer::Integer;
use num_traits::{ToPrimitive, Zero};
/// A trait for sampling random big integers.
///
/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
pub trait RandBigInt {
/// Generate a random [`BigUint`] of the given bit size.
fn gen_biguint(&mut self, bit_size: u64) -> BigUint;
/// Generate a random [ BigInt`] of the given bit size.
fn gen_bigint(&mut self, bit_size: u64) -> BigInt;
/// Generate a random [`BigUint`] less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random [`BigUint`] within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random [`BigInt`] within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
fn gen_bits<R: Rng + ?Sized>(rng: &mut R, data: &mut [u32], rem: u64) {
// `fill` is faster than many `gen::<u32>` calls
rng.fill(data);
if rem > 0 {
let last = data.len() - 1;
data[last] >>= 32 - rem;
}
}
impl<R: Rng + ?Sized> RandBigInt for R {
cfg_digit!(
fn gen_biguint(&mut self, bit_size: u64) -> BigUint {
let (digits, rem) = bit_size.div_rem(&32);
let len = (digits + (rem > 0) as u64)
.to_usize()
.expect("capacity overflow");
let mut data = vec![0u32; len];
gen_bits(self, &mut data, rem);
biguint_from_vec(data)
}
fn gen_biguint(&mut self, bit_size: u64) -> BigUint {
use core::slice;
let (digits, rem) = bit_size.div_rem(&32);
let len = (digits + (rem > 0) as u64)
.to_usize()
.expect("capacity overflow");
let native_digits = Integer::div_ceil(&bit_size, &64);
let native_len = native_digits.to_usize().expect("capacity overflow");
let mut data = vec![0u64; native_len];
unsafe {
// Generate bits in a `&mut [u32]` slice for value stability
let ptr = data.as_mut_ptr() as *mut u32;
debug_assert!(native_len * 2 >= len);
let data = slice::from_raw_parts_mut(ptr, len);
gen_bits(self, data, rem);
}
#[cfg(target_endian = "big")]
for digit in &mut data {
// swap u32 digits into u64 endianness
*digit = (*digit << 32) | (*digit >> 32);
}
biguint_from_vec(data)
}
);
fn gen_bigint(&mut self, bit_size: u64) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
// ...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(ubound.magnitude()))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(lbound.magnitude()))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(delta.magnitude()))
}
}
}
/// The back-end implementing rand's [`UniformSampler`] for [`BigUint`].
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
rng.gen_biguint_range(low.borrow(), high.borrow())
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's [`UniformSampler`] for [`BigInt`].
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: (high - low).into_parts().1,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
rng.gen_bigint_range(low.borrow(), high.borrow())
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for [`BigUint`] and [`BigInt`] values of a particular bit size.
///
/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: u64,
}
impl RandomBits {
#[inline]
pub fn new(bits: u64) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}

1206
vendor/num-bigint/src/biguint.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,256 @@
use super::{BigUint, IntDigits};
use crate::big_digit::{self, BigDigit};
use crate::UsizePromotion;
use core::iter::Sum;
use core::ops::{Add, AddAssign};
use num_traits::CheckedAdd;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64 as arch;
#[cfg(target_arch = "x86")]
use core::arch::x86 as arch;
// Add with carry:
#[cfg(target_arch = "x86_64")]
cfg_64!(
#[inline]
fn adc(carry: u8, a: u64, b: u64, out: &mut u64) -> u8 {
// Safety: There are absolutely no safety concerns with calling `_addcarry_u64`.
// It's just unsafe for API consistency with other intrinsics.
unsafe { arch::_addcarry_u64(carry, a, b, out) }
}
);
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
cfg_32!(
#[inline]
fn adc(carry: u8, a: u32, b: u32, out: &mut u32) -> u8 {
// Safety: There are absolutely no safety concerns with calling `_addcarry_u32`.
// It's just unsafe for API consistency with other intrinsics.
unsafe { arch::_addcarry_u32(carry, a, b, out) }
}
);
// fallback for environments where we don't have an addcarry intrinsic
// (copied from the standard library's `carrying_add`)
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
#[inline]
fn adc(carry: u8, lhs: BigDigit, rhs: BigDigit, out: &mut BigDigit) -> u8 {
let (a, b) = lhs.overflowing_add(rhs);
let (c, d) = a.overflowing_add(carry as BigDigit);
*out = c;
u8::from(b || d)
}
/// Two argument addition of raw slices, `a += b`, returning the carry.
///
/// This is used when the data `Vec` might need to resize to push a non-zero carry, so we perform
/// the addition first hoping that it will fit.
///
/// The caller _must_ ensure that `a` is at least as long as `b`.
#[inline]
pub(super) fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit {
debug_assert!(a.len() >= b.len());
let mut carry = 0;
let (a_lo, a_hi) = a.split_at_mut(b.len());
for (a, b) in a_lo.iter_mut().zip(b) {
carry = adc(carry, *a, *b, a);
}
if carry != 0 {
for a in a_hi {
carry = adc(carry, *a, 0, a);
if carry == 0 {
break;
}
}
}
carry as BigDigit
}
/// Two argument addition of raw slices:
/// a += b
///
/// The caller _must_ ensure that a is big enough to store the result - typically this means
/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry.
pub(super) fn add2(a: &mut [BigDigit], b: &[BigDigit]) {
let carry = __add2(a, b);
debug_assert!(carry == 0);
}
forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add);
forward_val_assign!(impl AddAssign for BigUint, add_assign);
impl Add<&BigUint> for BigUint {
type Output = BigUint;
fn add(mut self, other: &BigUint) -> BigUint {
self += other;
self
}
}
impl AddAssign<&BigUint> for BigUint {
#[inline]
fn add_assign(&mut self, other: &BigUint) {
let self_len = self.data.len();
let carry = if self_len < other.data.len() {
let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]);
self.data.extend_from_slice(&other.data[self_len..]);
__add2(&mut self.data[self_len..], &[lo_carry])
} else {
__add2(&mut self.data[..], &other.data[..])
};
if carry != 0 {
self.data.push(carry);
}
}
}
promote_unsigned_scalars!(impl Add for BigUint, add);
promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign);
forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigUint, add);
forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigUint, add);
forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigUint, add);
impl Add<u32> for BigUint {
type Output = BigUint;
#[inline]
fn add(mut self, other: u32) -> BigUint {
self += other;
self
}
}
impl AddAssign<u32> for BigUint {
#[inline]
fn add_assign(&mut self, other: u32) {
if other != 0 {
if self.data.is_empty() {
self.data.push(0);
}
let carry = __add2(&mut self.data, &[other as BigDigit]);
if carry != 0 {
self.data.push(carry);
}
}
}
}
impl Add<u64> for BigUint {
type Output = BigUint;
#[inline]
fn add(mut self, other: u64) -> BigUint {
self += other;
self
}
}
impl AddAssign<u64> for BigUint {
cfg_digit!(
#[inline]
fn add_assign(&mut self, other: u64) {
let (hi, lo) = big_digit::from_doublebigdigit(other);
if hi == 0 {
*self += lo;
} else {
while self.data.len() < 2 {
self.data.push(0);
}
let carry = __add2(&mut self.data, &[lo, hi]);
if carry != 0 {
self.data.push(carry);
}
}
}
#[inline]
fn add_assign(&mut self, other: u64) {
if other != 0 {
if self.data.is_empty() {
self.data.push(0);
}
let carry = __add2(&mut self.data, &[other as BigDigit]);
if carry != 0 {
self.data.push(carry);
}
}
}
);
}
impl Add<u128> for BigUint {
type Output = BigUint;
#[inline]
fn add(mut self, other: u128) -> BigUint {
self += other;
self
}
}
impl AddAssign<u128> for BigUint {
cfg_digit!(
#[inline]
fn add_assign(&mut self, other: u128) {
if other <= u128::from(u64::MAX) {
*self += other as u64
} else {
let (a, b, c, d) = super::u32_from_u128(other);
let carry = if a > 0 {
while self.data.len() < 4 {
self.data.push(0);
}
__add2(&mut self.data, &[d, c, b, a])
} else {
debug_assert!(b > 0);
while self.data.len() < 3 {
self.data.push(0);
}
__add2(&mut self.data, &[d, c, b])
};
if carry != 0 {
self.data.push(carry);
}
}
}
#[inline]
fn add_assign(&mut self, other: u128) {
let (hi, lo) = big_digit::from_doublebigdigit(other);
if hi == 0 {
*self += lo;
} else {
while self.data.len() < 2 {
self.data.push(0);
}
let carry = __add2(&mut self.data, &[lo, hi]);
if carry != 0 {
self.data.push(carry);
}
}
}
);
}
impl CheckedAdd for BigUint {
#[inline]
fn checked_add(&self, v: &BigUint) -> Option<BigUint> {
Some(self.add(v))
}
}
impl_sum_iter_type!(BigUint);

View File

@@ -0,0 +1,38 @@
#![cfg(any(feature = "quickcheck", feature = "arbitrary"))]
use super::{biguint_from_vec, BigUint};
use crate::big_digit::BigDigit;
#[cfg(feature = "quickcheck")]
use alloc::boxed::Box;
use alloc::vec::Vec;
#[cfg(feature = "quickcheck")]
#[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))]
impl quickcheck::Arbitrary for BigUint {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
// Use arbitrary from Vec
biguint_from_vec(Vec::<BigDigit>::arbitrary(g))
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
// Use shrinker from Vec
Box::new(self.data.shrink().map(biguint_from_vec))
}
}
#[cfg(feature = "arbitrary")]
#[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))]
impl arbitrary::Arbitrary<'_> for BigUint {
fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
Ok(biguint_from_vec(Vec::<BigDigit>::arbitrary(u)?))
}
fn arbitrary_take_rest(u: arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
Ok(biguint_from_vec(Vec::<BigDigit>::arbitrary_take_rest(u)?))
}
fn size_hint(depth: usize) -> (usize, Option<usize>) {
Vec::<BigDigit>::size_hint(depth)
}
}

93
vendor/num-bigint/src/biguint/bits.rs vendored Normal file
View File

@@ -0,0 +1,93 @@
use super::{BigUint, IntDigits};
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign};
forward_val_val_binop!(impl BitAnd for BigUint, bitand);
forward_ref_val_binop!(impl BitAnd for BigUint, bitand);
// do not use forward_ref_ref_binop_commutative! for bitand so that we can
// clone the smaller value rather than the larger, avoiding over-allocation
impl BitAnd<&BigUint> for &BigUint {
type Output = BigUint;
#[inline]
fn bitand(self, other: &BigUint) -> BigUint {
// forward to val-ref, choosing the smaller to clone
if self.data.len() <= other.data.len() {
self.clone() & other
} else {
other.clone() & self
}
}
}
forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign);
impl BitAnd<&BigUint> for BigUint {
type Output = BigUint;
#[inline]
fn bitand(mut self, other: &BigUint) -> BigUint {
self &= other;
self
}
}
impl BitAndAssign<&BigUint> for BigUint {
#[inline]
fn bitand_assign(&mut self, other: &BigUint) {
for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
*ai &= bi;
}
self.data.truncate(other.data.len());
self.normalize();
}
}
forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor);
forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign);
impl BitOr<&BigUint> for BigUint {
type Output = BigUint;
fn bitor(mut self, other: &BigUint) -> BigUint {
self |= other;
self
}
}
impl BitOrAssign<&BigUint> for BigUint {
#[inline]
fn bitor_assign(&mut self, other: &BigUint) {
for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
*ai |= bi;
}
if other.data.len() > self.data.len() {
let extra = &other.data[self.data.len()..];
self.data.extend(extra.iter().cloned());
}
}
}
forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor);
forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign);
impl BitXor<&BigUint> for BigUint {
type Output = BigUint;
fn bitxor(mut self, other: &BigUint) -> BigUint {
self ^= other;
self
}
}
impl BitXorAssign<&BigUint> for BigUint {
#[inline]
fn bitxor_assign(&mut self, other: &BigUint) {
for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
*ai ^= bi;
}
if other.data.len() > self.data.len() {
let extra = &other.data[self.data.len()..];
self.data.extend(extra.iter().cloned());
}
self.normalize();
}
}

866
vendor/num-bigint/src/biguint/convert.rs vendored Normal file
View File

@@ -0,0 +1,866 @@
// This uses stdlib features higher than the MSRV
#![allow(clippy::manual_range_contains)] // 1.35
use super::{biguint_from_vec, BigUint, ToBigUint};
use super::addition::add2;
use super::division::{div_rem_digit, FAST_DIV_WIDE};
use super::multiplication::mac_with_carry;
use crate::big_digit::{self, BigDigit};
use crate::ParseBigIntError;
use crate::TryFromBigIntError;
use alloc::vec::Vec;
use core::cmp::Ordering::{Equal, Greater, Less};
use core::convert::TryFrom;
use core::mem;
use core::str::FromStr;
use num_integer::{Integer, Roots};
use num_traits::float::FloatCore;
use num_traits::{FromPrimitive, Num, One, PrimInt, ToPrimitive, Zero};
/// Find last set bit
/// fls(0) == 0, fls(u32::MAX) == 32
fn fls<T: PrimInt>(v: T) -> u8 {
mem::size_of::<T>() as u8 * 8 - v.leading_zeros() as u8
}
fn ilog2<T: PrimInt>(v: T) -> u8 {
fls(v) - 1
}
impl FromStr for BigUint {
type Err = ParseBigIntError;
#[inline]
fn from_str(s: &str) -> Result<BigUint, ParseBigIntError> {
BigUint::from_str_radix(s, 10)
}
}
// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides
// BigDigit::BITS
pub(super) fn from_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint {
debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0);
debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
let digits_per_big_digit = big_digit::BITS / bits;
let data = v
.chunks(digits_per_big_digit.into())
.map(|chunk| {
chunk
.iter()
.rev()
.fold(0, |acc, &c| (acc << bits) | BigDigit::from(c))
})
.collect();
biguint_from_vec(data)
}
// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide
// BigDigit::BITS
fn from_inexact_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint {
debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0);
debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
let total_bits = (v.len() as u64).saturating_mul(bits.into());
let big_digits = Integer::div_ceil(&total_bits, &big_digit::BITS.into())
.to_usize()
.unwrap_or(usize::MAX);
let mut data = Vec::with_capacity(big_digits);
let mut d = 0;
let mut dbits = 0; // number of bits we currently have in d
// walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a
// big_digit:
for &c in v {
d |= BigDigit::from(c) << dbits;
dbits += bits;
if dbits >= big_digit::BITS {
data.push(d);
dbits -= big_digit::BITS;
// if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit
// in d) - grab the bits we lost here:
d = BigDigit::from(c) >> (bits - dbits);
}
}
if dbits > 0 {
debug_assert!(dbits < big_digit::BITS);
data.push(d as BigDigit);
}
biguint_from_vec(data)
}
// Read little-endian radix digits
fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint {
debug_assert!(!v.is_empty() && !radix.is_power_of_two());
debug_assert!(v.iter().all(|&c| u32::from(c) < radix));
// Estimate how big the result will be, so we can pre-allocate it.
#[cfg(feature = "std")]
let big_digits = {
let radix_log2 = f64::from(radix).log2();
let bits = radix_log2 * v.len() as f64;
(bits / big_digit::BITS as f64).ceil()
};
#[cfg(not(feature = "std"))]
let big_digits = {
let radix_log2 = ilog2(radix.next_power_of_two()) as usize;
let bits = radix_log2 * v.len();
(bits / big_digit::BITS as usize) + 1
};
let mut data = Vec::with_capacity(big_digits.to_usize().unwrap_or(0));
let (base, power) = get_radix_base(radix);
let radix = radix as BigDigit;
let r = v.len() % power;
let i = if r == 0 { power } else { r };
let (head, tail) = v.split_at(i);
let first = head
.iter()
.fold(0, |acc, &d| acc * radix + BigDigit::from(d));
data.push(first);
debug_assert!(tail.len() % power == 0);
for chunk in tail.chunks(power) {
if data.last() != Some(&0) {
data.push(0);
}
let mut carry = 0;
for d in data.iter_mut() {
*d = mac_with_carry(0, *d, base, &mut carry);
}
debug_assert!(carry == 0);
let n = chunk
.iter()
.fold(0, |acc, &d| acc * radix + BigDigit::from(d));
add2(&mut data, &[n]);
}
biguint_from_vec(data)
}
pub(super) fn from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint> {
assert!(
2 <= radix && radix <= 256,
"The radix must be within 2...256"
);
if buf.is_empty() {
return Some(BigUint::ZERO);
}
if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
return None;
}
let res = if radix.is_power_of_two() {
// Powers of two can use bitwise masks and shifting instead of multiplication
let bits = ilog2(radix);
let mut v = Vec::from(buf);
v.reverse();
if big_digit::BITS % bits == 0 {
from_bitwise_digits_le(&v, bits)
} else {
from_inexact_bitwise_digits_le(&v, bits)
}
} else {
from_radix_digits_be(buf, radix)
};
Some(res)
}
pub(super) fn from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint> {
assert!(
2 <= radix && radix <= 256,
"The radix must be within 2...256"
);
if buf.is_empty() {
return Some(BigUint::ZERO);
}
if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
return None;
}
let res = if radix.is_power_of_two() {
// Powers of two can use bitwise masks and shifting instead of multiplication
let bits = ilog2(radix);
if big_digit::BITS % bits == 0 {
from_bitwise_digits_le(buf, bits)
} else {
from_inexact_bitwise_digits_le(buf, bits)
}
} else {
let mut v = Vec::from(buf);
v.reverse();
from_radix_digits_be(&v, radix)
};
Some(res)
}
impl Num for BigUint {
type FromStrRadixErr = ParseBigIntError;
/// Creates and initializes a `BigUint`.
fn from_str_radix(s: &str, radix: u32) -> Result<BigUint, ParseBigIntError> {
assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
let mut s = s;
if let Some(tail) = s.strip_prefix('+') {
if !tail.starts_with('+') {
s = tail
}
}
if s.is_empty() {
return Err(ParseBigIntError::empty());
}
if s.starts_with('_') {
// Must lead with a real digit!
return Err(ParseBigIntError::invalid());
}
// First normalize all characters to plain digit values
let mut v = Vec::with_capacity(s.len());
for b in s.bytes() {
let d = match b {
b'0'..=b'9' => b - b'0',
b'a'..=b'z' => b - b'a' + 10,
b'A'..=b'Z' => b - b'A' + 10,
b'_' => continue,
_ => u8::MAX,
};
if d < radix as u8 {
v.push(d);
} else {
return Err(ParseBigIntError::invalid());
}
}
let res = if radix.is_power_of_two() {
// Powers of two can use bitwise masks and shifting instead of multiplication
let bits = ilog2(radix);
v.reverse();
if big_digit::BITS % bits == 0 {
from_bitwise_digits_le(&v, bits)
} else {
from_inexact_bitwise_digits_le(&v, bits)
}
} else {
from_radix_digits_be(&v, radix)
};
Ok(res)
}
}
fn high_bits_to_u64(v: &BigUint) -> u64 {
match v.data.len() {
0 => 0,
1 => {
// XXX Conversion is useless if already 64-bit.
#[allow(clippy::useless_conversion)]
let v0 = u64::from(v.data[0]);
v0
}
_ => {
let mut bits = v.bits();
let mut ret = 0u64;
let mut ret_bits = 0;
for d in v.data.iter().rev() {
let digit_bits = (bits - 1) % u64::from(big_digit::BITS) + 1;
let bits_want = Ord::min(64 - ret_bits, digit_bits);
if bits_want != 0 {
if bits_want != 64 {
ret <<= bits_want;
}
// XXX Conversion is useless if already 64-bit.
#[allow(clippy::useless_conversion)]
let d0 = u64::from(*d) >> (digit_bits - bits_want);
ret |= d0;
}
// Implement round-to-odd: If any lower bits are 1, set LSB to 1
// so that rounding again to floating point value using
// nearest-ties-to-even is correct.
//
// See: https://en.wikipedia.org/wiki/Rounding#Rounding_to_prepare_for_shorter_precision
if digit_bits - bits_want != 0 {
// XXX Conversion is useless if already 64-bit.
#[allow(clippy::useless_conversion)]
let masked = u64::from(*d) << (64 - (digit_bits - bits_want) as u32);
ret |= (masked != 0) as u64;
}
ret_bits += bits_want;
bits -= bits_want;
}
ret
}
}
}
impl ToPrimitive for BigUint {
#[inline]
fn to_i64(&self) -> Option<i64> {
self.to_u64().as_ref().and_then(u64::to_i64)
}
#[inline]
fn to_i128(&self) -> Option<i128> {
self.to_u128().as_ref().and_then(u128::to_i128)
}
#[allow(clippy::useless_conversion)]
#[inline]
fn to_u64(&self) -> Option<u64> {
let mut ret: u64 = 0;
let mut bits = 0;
for i in self.data.iter() {
if bits >= 64 {
return None;
}
// XXX Conversion is useless if already 64-bit.
ret += u64::from(*i) << bits;
bits += big_digit::BITS;
}
Some(ret)
}
#[inline]
fn to_u128(&self) -> Option<u128> {
let mut ret: u128 = 0;
let mut bits = 0;
for i in self.data.iter() {
if bits >= 128 {
return None;
}
ret |= u128::from(*i) << bits;
bits += big_digit::BITS;
}
Some(ret)
}
#[inline]
fn to_f32(&self) -> Option<f32> {
let mantissa = high_bits_to_u64(self);
let exponent = self.bits() - u64::from(fls(mantissa));
if exponent > f32::MAX_EXP as u64 {
Some(f32::INFINITY)
} else {
Some((mantissa as f32) * 2.0f32.powi(exponent as i32))
}
}
#[inline]
fn to_f64(&self) -> Option<f64> {
let mantissa = high_bits_to_u64(self);
let exponent = self.bits() - u64::from(fls(mantissa));
if exponent > f64::MAX_EXP as u64 {
Some(f64::INFINITY)
} else {
Some((mantissa as f64) * 2.0f64.powi(exponent as i32))
}
}
}
macro_rules! impl_try_from_biguint {
($T:ty, $to_ty:path) => {
impl TryFrom<&BigUint> for $T {
type Error = TryFromBigIntError<()>;
#[inline]
fn try_from(value: &BigUint) -> Result<$T, TryFromBigIntError<()>> {
$to_ty(value).ok_or(TryFromBigIntError::new(()))
}
}
impl TryFrom<BigUint> for $T {
type Error = TryFromBigIntError<BigUint>;
#[inline]
fn try_from(value: BigUint) -> Result<$T, TryFromBigIntError<BigUint>> {
<$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value))
}
}
};
}
impl_try_from_biguint!(u8, ToPrimitive::to_u8);
impl_try_from_biguint!(u16, ToPrimitive::to_u16);
impl_try_from_biguint!(u32, ToPrimitive::to_u32);
impl_try_from_biguint!(u64, ToPrimitive::to_u64);
impl_try_from_biguint!(usize, ToPrimitive::to_usize);
impl_try_from_biguint!(u128, ToPrimitive::to_u128);
impl_try_from_biguint!(i8, ToPrimitive::to_i8);
impl_try_from_biguint!(i16, ToPrimitive::to_i16);
impl_try_from_biguint!(i32, ToPrimitive::to_i32);
impl_try_from_biguint!(i64, ToPrimitive::to_i64);
impl_try_from_biguint!(isize, ToPrimitive::to_isize);
impl_try_from_biguint!(i128, ToPrimitive::to_i128);
impl FromPrimitive for BigUint {
#[inline]
fn from_i64(n: i64) -> Option<BigUint> {
if n >= 0 {
Some(BigUint::from(n as u64))
} else {
None
}
}
#[inline]
fn from_i128(n: i128) -> Option<BigUint> {
if n >= 0 {
Some(BigUint::from(n as u128))
} else {
None
}
}
#[inline]
fn from_u64(n: u64) -> Option<BigUint> {
Some(BigUint::from(n))
}
#[inline]
fn from_u128(n: u128) -> Option<BigUint> {
Some(BigUint::from(n))
}
#[inline]
fn from_f64(mut n: f64) -> Option<BigUint> {
// handle NAN, INFINITY, NEG_INFINITY
if !n.is_finite() {
return None;
}
// match the rounding of casting from float to int
n = n.trunc();
// handle 0.x, -0.x
if n.is_zero() {
return Some(Self::ZERO);
}
let (mantissa, exponent, sign) = FloatCore::integer_decode(n);
if sign == -1 {
return None;
}
let mut ret = BigUint::from(mantissa);
match exponent.cmp(&0) {
Greater => ret <<= exponent as usize,
Equal => {}
Less => ret >>= (-exponent) as usize,
}
Some(ret)
}
}
impl From<u64> for BigUint {
#[inline]
fn from(mut n: u64) -> Self {
let mut ret: BigUint = Self::ZERO;
while n != 0 {
ret.data.push(n as BigDigit);
// don't overflow if BITS is 64:
n = (n >> 1) >> (big_digit::BITS - 1);
}
ret
}
}
impl From<u128> for BigUint {
#[inline]
fn from(mut n: u128) -> Self {
let mut ret: BigUint = Self::ZERO;
while n != 0 {
ret.data.push(n as BigDigit);
n >>= big_digit::BITS;
}
ret
}
}
macro_rules! impl_biguint_from_uint {
($T:ty) => {
impl From<$T> for BigUint {
#[inline]
fn from(n: $T) -> Self {
BigUint::from(n as u64)
}
}
};
}
impl_biguint_from_uint!(u8);
impl_biguint_from_uint!(u16);
impl_biguint_from_uint!(u32);
impl_biguint_from_uint!(usize);
macro_rules! impl_biguint_try_from_int {
($T:ty, $from_ty:path) => {
impl TryFrom<$T> for BigUint {
type Error = TryFromBigIntError<()>;
#[inline]
fn try_from(value: $T) -> Result<BigUint, TryFromBigIntError<()>> {
$from_ty(value).ok_or(TryFromBigIntError::new(()))
}
}
};
}
impl_biguint_try_from_int!(i8, FromPrimitive::from_i8);
impl_biguint_try_from_int!(i16, FromPrimitive::from_i16);
impl_biguint_try_from_int!(i32, FromPrimitive::from_i32);
impl_biguint_try_from_int!(i64, FromPrimitive::from_i64);
impl_biguint_try_from_int!(isize, FromPrimitive::from_isize);
impl_biguint_try_from_int!(i128, FromPrimitive::from_i128);
impl ToBigUint for BigUint {
#[inline]
fn to_biguint(&self) -> Option<BigUint> {
Some(self.clone())
}
}
macro_rules! impl_to_biguint {
($T:ty, $from_ty:path) => {
impl ToBigUint for $T {
#[inline]
fn to_biguint(&self) -> Option<BigUint> {
$from_ty(*self)
}
}
};
}
impl_to_biguint!(isize, FromPrimitive::from_isize);
impl_to_biguint!(i8, FromPrimitive::from_i8);
impl_to_biguint!(i16, FromPrimitive::from_i16);
impl_to_biguint!(i32, FromPrimitive::from_i32);
impl_to_biguint!(i64, FromPrimitive::from_i64);
impl_to_biguint!(i128, FromPrimitive::from_i128);
impl_to_biguint!(usize, FromPrimitive::from_usize);
impl_to_biguint!(u8, FromPrimitive::from_u8);
impl_to_biguint!(u16, FromPrimitive::from_u16);
impl_to_biguint!(u32, FromPrimitive::from_u32);
impl_to_biguint!(u64, FromPrimitive::from_u64);
impl_to_biguint!(u128, FromPrimitive::from_u128);
impl_to_biguint!(f32, FromPrimitive::from_f32);
impl_to_biguint!(f64, FromPrimitive::from_f64);
impl From<bool> for BigUint {
fn from(x: bool) -> Self {
if x {
One::one()
} else {
Self::ZERO
}
}
}
// Extract bitwise digits that evenly divide BigDigit
pub(super) fn to_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec<u8> {
debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0);
let last_i = u.data.len() - 1;
let mask: BigDigit = (1 << bits) - 1;
let digits_per_big_digit = big_digit::BITS / bits;
let digits = Integer::div_ceil(&u.bits(), &u64::from(bits))
.to_usize()
.unwrap_or(usize::MAX);
let mut res = Vec::with_capacity(digits);
for mut r in u.data[..last_i].iter().cloned() {
for _ in 0..digits_per_big_digit {
res.push((r & mask) as u8);
r >>= bits;
}
}
let mut r = u.data[last_i];
while r != 0 {
res.push((r & mask) as u8);
r >>= bits;
}
res
}
// Extract bitwise digits that don't evenly divide BigDigit
fn to_inexact_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec<u8> {
debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0);
let mask: BigDigit = (1 << bits) - 1;
let digits = Integer::div_ceil(&u.bits(), &u64::from(bits))
.to_usize()
.unwrap_or(usize::MAX);
let mut res = Vec::with_capacity(digits);
let mut r = 0;
let mut rbits = 0;
for c in &u.data {
r |= *c << rbits;
rbits += big_digit::BITS;
while rbits >= bits {
res.push((r & mask) as u8);
r >>= bits;
// r had more bits than it could fit - grab the bits we lost
if rbits > big_digit::BITS {
r = *c >> (big_digit::BITS - (rbits - bits));
}
rbits -= bits;
}
}
if rbits != 0 {
res.push(r as u8);
}
while let Some(&0) = res.last() {
res.pop();
}
res
}
// Extract little-endian radix digits
#[inline(always)] // forced inline to get const-prop for radix=10
pub(super) fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec<u8> {
debug_assert!(!u.is_zero() && !radix.is_power_of_two());
#[cfg(feature = "std")]
let radix_digits = {
let radix_log2 = f64::from(radix).log2();
((u.bits() as f64) / radix_log2).ceil()
};
#[cfg(not(feature = "std"))]
let radix_digits = {
let radix_log2 = ilog2(radix) as usize;
((u.bits() as usize) / radix_log2) + 1
};
// Estimate how big the result will be, so we can pre-allocate it.
let mut res = Vec::with_capacity(radix_digits.to_usize().unwrap_or(0));
let mut digits = u.clone();
// X86 DIV can quickly divide by a full digit, otherwise we choose a divisor
// that's suitable for `div_half` to avoid slow `DoubleBigDigit` division.
let (base, power) = if FAST_DIV_WIDE {
get_radix_base(radix)
} else {
get_half_radix_base(radix)
};
let radix = radix as BigDigit;
// For very large numbers, the O(n²) loop of repeated `div_rem_digit` dominates the
// performance. We can mitigate this by dividing into chunks of a larger base first.
// The threshold for this was chosen by anecdotal performance measurements to
// approximate where this starts to make a noticeable difference.
if digits.data.len() >= 64 {
let mut big_base = BigUint::from(base);
let mut big_power = 1usize;
// Choose a target base length near √n.
let target_len = digits.data.len().sqrt();
while big_base.data.len() < target_len {
big_base = &big_base * &big_base;
big_power *= 2;
}
// This outer loop will run approximately √n times.
while digits > big_base {
// This is still the dominating factor, with n digits divided by √n digits.
let (q, mut big_r) = digits.div_rem(&big_base);
digits = q;
// This inner loop now has O(√n²)=O(n) behavior altogether.
for _ in 0..big_power {
let (q, mut r) = div_rem_digit(big_r, base);
big_r = q;
for _ in 0..power {
res.push((r % radix) as u8);
r /= radix;
}
}
}
}
while digits.data.len() > 1 {
let (q, mut r) = div_rem_digit(digits, base);
for _ in 0..power {
res.push((r % radix) as u8);
r /= radix;
}
digits = q;
}
let mut r = digits.data[0];
while r != 0 {
res.push((r % radix) as u8);
r /= radix;
}
res
}
pub(super) fn to_radix_le(u: &BigUint, radix: u32) -> Vec<u8> {
if u.is_zero() {
vec![0]
} else if radix.is_power_of_two() {
// Powers of two can use bitwise masks and shifting instead of division
let bits = ilog2(radix);
if big_digit::BITS % bits == 0 {
to_bitwise_digits_le(u, bits)
} else {
to_inexact_bitwise_digits_le(u, bits)
}
} else if radix == 10 {
// 10 is so common that it's worth separating out for const-propagation.
// Optimizers can often turn constant division into a faster multiplication.
to_radix_digits_le(u, 10)
} else {
to_radix_digits_le(u, radix)
}
}
pub(crate) fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec<u8> {
assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
if u.is_zero() {
return vec![b'0'];
}
let mut res = to_radix_le(u, radix);
// Now convert everything to ASCII digits.
for r in &mut res {
debug_assert!(u32::from(*r) < radix);
if *r < 10 {
*r += b'0';
} else {
*r += b'a' - 10;
}
}
res
}
/// Returns the greatest power of the radix for the `BigDigit` bit size
#[inline]
fn get_radix_base(radix: u32) -> (BigDigit, usize) {
static BASES: [(BigDigit, usize); 257] = generate_radix_bases(big_digit::MAX);
debug_assert!(!radix.is_power_of_two());
debug_assert!((3..256).contains(&radix));
BASES[radix as usize]
}
/// Returns the greatest power of the radix for half the `BigDigit` bit size
#[inline]
fn get_half_radix_base(radix: u32) -> (BigDigit, usize) {
static BASES: [(BigDigit, usize); 257] = generate_radix_bases(big_digit::HALF);
debug_assert!(!radix.is_power_of_two());
debug_assert!((3..256).contains(&radix));
BASES[radix as usize]
}
/// Generate tables of the greatest power of each radix that is less that the given maximum. These
/// are returned from `get_radix_base` to batch the multiplication/division of radix conversions on
/// full `BigUint` values, operating on primitive integers as much as possible.
///
/// e.g. BASES_16[3] = (59049, 10) // 3¹⁰ fits in u16, but 3¹¹ is too big
/// BASES_32[3] = (3486784401, 20)
/// BASES_64[3] = (12157665459056928801, 40)
///
/// Powers of two are not included, just zeroed, as they're implemented with shifts.
const fn generate_radix_bases(max: BigDigit) -> [(BigDigit, usize); 257] {
let mut bases = [(0, 0); 257];
let mut radix: BigDigit = 3;
while radix < 256 {
if !radix.is_power_of_two() {
let mut power = 1;
let mut base = radix;
while let Some(b) = base.checked_mul(radix) {
if b > max {
break;
}
base = b;
power += 1;
}
bases[radix as usize] = (base, power)
}
radix += 1;
}
bases
}
#[test]
fn test_radix_bases() {
for radix in 3u32..256 {
if !radix.is_power_of_two() {
let (base, power) = get_radix_base(radix);
let radix = BigDigit::from(radix);
let power = u32::try_from(power).unwrap();
assert_eq!(base, radix.pow(power));
assert!(radix.checked_pow(power + 1).is_none());
}
}
}
#[test]
fn test_half_radix_bases() {
for radix in 3u32..256 {
if !radix.is_power_of_two() {
let (base, power) = get_half_radix_base(radix);
let radix = BigDigit::from(radix);
let power = u32::try_from(power).unwrap();
assert_eq!(base, radix.pow(power));
assert!(radix.pow(power + 1) > big_digit::HALF);
}
}
}

View File

@@ -0,0 +1,704 @@
use super::addition::__add2;
use super::{cmp_slice, BigUint};
use crate::big_digit::{self, BigDigit, DoubleBigDigit};
use crate::UsizePromotion;
use core::cmp::Ordering::{Equal, Greater, Less};
use core::mem;
use core::ops::{Div, DivAssign, Rem, RemAssign};
use num_integer::Integer;
use num_traits::{CheckedDiv, CheckedEuclid, Euclid, One, ToPrimitive, Zero};
pub(super) const FAST_DIV_WIDE: bool = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder:
///
/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit.
/// This is _not_ true for an arbitrary numerator/denominator.
///
/// (This function also matches what the x86 divide instruction does).
#[cfg(any(miri, not(any(target_arch = "x86", target_arch = "x86_64"))))]
#[inline]
fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
debug_assert!(hi < divisor);
let lhs = big_digit::to_doublebigdigit(hi, lo);
let rhs = DoubleBigDigit::from(divisor);
((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit)
}
/// x86 and x86_64 can use a real `div` instruction.
#[cfg(all(not(miri), any(target_arch = "x86", target_arch = "x86_64")))]
#[inline]
fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
// This debug assertion covers the potential #DE for divisor==0 or a quotient too large for one
// register, otherwise in release mode it will become a target-specific fault like SIGFPE.
// This should never occur with the inputs from our few `div_wide` callers.
debug_assert!(hi < divisor);
// SAFETY: The `div` instruction only affects registers, reading the explicit operand as the
// divisor, and implicitly reading RDX:RAX or EDX:EAX as the dividend. The result is implicitly
// written back to RAX or EAX for the quotient and RDX or EDX for the remainder. No memory is
// used, and flags are not preserved.
unsafe {
let (div, rem);
cfg_digit!(
macro_rules! div {
() => {
"div {0:e}"
};
}
macro_rules! div {
() => {
"div {0:r}"
};
}
);
core::arch::asm!(
div!(),
in(reg) divisor,
inout("dx") hi => rem,
inout("ax") lo => div,
options(pure, nomem, nostack),
);
(div, rem)
}
}
/// For small divisors, we can divide without promoting to `DoubleBigDigit` by
/// using half-size pieces of digit, like long-division.
#[inline]
fn div_half(rem: BigDigit, digit: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
use crate::big_digit::{HALF, HALF_BITS};
debug_assert!(rem < divisor && divisor <= HALF);
let (hi, rem) = ((rem << HALF_BITS) | (digit >> HALF_BITS)).div_rem(&divisor);
let (lo, rem) = ((rem << HALF_BITS) | (digit & HALF)).div_rem(&divisor);
((hi << HALF_BITS) | lo, rem)
}
#[inline]
pub(super) fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) {
if b == 0 {
panic!("attempt to divide by zero")
}
let mut rem = 0;
if !FAST_DIV_WIDE && b <= big_digit::HALF {
for d in a.data.iter_mut().rev() {
let (q, r) = div_half(rem, *d, b);
*d = q;
rem = r;
}
} else {
for d in a.data.iter_mut().rev() {
let (q, r) = div_wide(rem, *d, b);
*d = q;
rem = r;
}
}
(a.normalized(), rem)
}
#[inline]
fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit {
if b == 0 {
panic!("attempt to divide by zero")
}
let mut rem = 0;
if !FAST_DIV_WIDE && b <= big_digit::HALF {
for &digit in a.data.iter().rev() {
let (_, r) = div_half(rem, digit, b);
rem = r;
}
} else {
for &digit in a.data.iter().rev() {
let (_, r) = div_wide(rem, digit, b);
rem = r;
}
}
rem
}
/// Subtract a multiple.
/// a -= b * c
/// Returns a borrow (if a < b then borrow > 0).
fn sub_mul_digit_same_len(a: &mut [BigDigit], b: &[BigDigit], c: BigDigit) -> BigDigit {
debug_assert!(a.len() == b.len());
// carry is between -big_digit::MAX and 0, so to avoid overflow we store
// offset_carry = carry + big_digit::MAX
let mut offset_carry = big_digit::MAX;
for (x, y) in a.iter_mut().zip(b) {
// We want to calculate sum = x - y * c + carry.
// sum >= -(big_digit::MAX * big_digit::MAX) - big_digit::MAX
// sum <= big_digit::MAX
// Offsetting sum by (big_digit::MAX << big_digit::BITS) puts it in DoubleBigDigit range.
let offset_sum = big_digit::to_doublebigdigit(big_digit::MAX, *x)
- big_digit::MAX as DoubleBigDigit
+ offset_carry as DoubleBigDigit
- *y as DoubleBigDigit * c as DoubleBigDigit;
let (new_offset_carry, new_x) = big_digit::from_doublebigdigit(offset_sum);
offset_carry = new_offset_carry;
*x = new_x;
}
// Return the borrow.
big_digit::MAX - offset_carry
}
fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) {
if d.is_zero() {
panic!("attempt to divide by zero")
}
if u.is_zero() {
return (BigUint::ZERO, BigUint::ZERO);
}
if d.data.len() == 1 {
if d.data == [1] {
return (u, BigUint::ZERO);
}
let (div, rem) = div_rem_digit(u, d.data[0]);
// reuse d
d.data.clear();
d += rem;
return (div, d);
}
// Required or the q_len calculation below can underflow:
match u.cmp(&d) {
Less => return (BigUint::ZERO, u),
Equal => {
u.set_one();
return (u, BigUint::ZERO);
}
Greater => {} // Do nothing
}
// This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
//
// First, normalize the arguments so the highest bit in the highest digit of the divisor is
// set: the main loop uses the highest digit of the divisor for generating guesses, so we
// want it to be the largest number we can efficiently divide by.
//
let shift = d.data.last().unwrap().leading_zeros() as usize;
if shift == 0 {
// no need to clone d
div_rem_core(u, &d.data)
} else {
let (q, r) = div_rem_core(u << shift, &(d << shift).data);
// renormalize the remainder
(q, r >> shift)
}
}
pub(super) fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) {
if d.is_zero() {
panic!("attempt to divide by zero")
}
if u.is_zero() {
return (BigUint::ZERO, BigUint::ZERO);
}
if d.data.len() == 1 {
if d.data == [1] {
return (u.clone(), BigUint::ZERO);
}
let (div, rem) = div_rem_digit(u.clone(), d.data[0]);
return (div, rem.into());
}
// Required or the q_len calculation below can underflow:
match u.cmp(d) {
Less => return (BigUint::ZERO, u.clone()),
Equal => return (One::one(), BigUint::ZERO),
Greater => {} // Do nothing
}
// This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
//
// First, normalize the arguments so the highest bit in the highest digit of the divisor is
// set: the main loop uses the highest digit of the divisor for generating guesses, so we
// want it to be the largest number we can efficiently divide by.
//
let shift = d.data.last().unwrap().leading_zeros() as usize;
if shift == 0 {
// no need to clone d
div_rem_core(u.clone(), &d.data)
} else {
let (q, r) = div_rem_core(u << shift, &(d << shift).data);
// renormalize the remainder
(q, r >> shift)
}
}
/// An implementation of the base division algorithm.
/// Knuth, TAOCP vol 2 section 4.3.1, algorithm D, with an improvement from exercises 19-21.
fn div_rem_core(mut a: BigUint, b: &[BigDigit]) -> (BigUint, BigUint) {
debug_assert!(a.data.len() >= b.len() && b.len() > 1);
debug_assert!(b.last().unwrap().leading_zeros() == 0);
// The algorithm works by incrementally calculating "guesses", q0, for the next digit of the
// quotient. Once we have any number q0 such that (q0 << j) * b <= a, we can set
//
// q += q0 << j
// a -= (q0 << j) * b
//
// and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder.
//
// q0, our guess, is calculated by dividing the last three digits of a by the last two digits of
// b - this will give us a guess that is close to the actual quotient, but is possibly greater.
// It can only be greater by 1 and only in rare cases, with probability at most
// 2^-(big_digit::BITS-1) for random a, see TAOCP 4.3.1 exercise 21.
//
// If the quotient turns out to be too large, we adjust it by 1:
// q -= 1 << j
// a += b << j
// a0 stores an additional extra most significant digit of the dividend, not stored in a.
let mut a0 = 0;
// [b1, b0] are the two most significant digits of the divisor. They never change.
let b0 = b[b.len() - 1];
let b1 = b[b.len() - 2];
let q_len = a.data.len() - b.len() + 1;
let mut q = BigUint {
data: vec![0; q_len],
};
for j in (0..q_len).rev() {
debug_assert!(a.data.len() == b.len() + j);
let a1 = *a.data.last().unwrap();
let a2 = a.data[a.data.len() - 2];
// The first q0 estimate is [a1,a0] / b0. It will never be too small, it may be too large
// by at most 2.
let (mut q0, mut r) = if a0 < b0 {
let (q0, r) = div_wide(a0, a1, b0);
(q0, r as DoubleBigDigit)
} else {
debug_assert!(a0 == b0);
// Avoid overflowing q0, we know the quotient fits in BigDigit.
// [a1,a0] = b0 * (1<<BITS - 1) + (a0 + a1)
(big_digit::MAX, a0 as DoubleBigDigit + a1 as DoubleBigDigit)
};
// r = [a1,a0] - q0 * b0
//
// Now we want to compute a more precise estimate [a2,a1,a0] / [b1,b0] which can only be
// less or equal to the current q0.
//
// q0 is too large if:
// [a2,a1,a0] < q0 * [b1,b0]
// (r << BITS) + a2 < q0 * b1
while r <= big_digit::MAX as DoubleBigDigit
&& big_digit::to_doublebigdigit(r as BigDigit, a2)
< q0 as DoubleBigDigit * b1 as DoubleBigDigit
{
q0 -= 1;
r += b0 as DoubleBigDigit;
}
// q0 is now either the correct quotient digit, or in rare cases 1 too large.
// Subtract (q0 << j) from a. This may overflow, in which case we will have to correct.
let mut borrow = sub_mul_digit_same_len(&mut a.data[j..], b, q0);
if borrow > a0 {
// q0 is too large. We need to add back one multiple of b.
q0 -= 1;
borrow -= __add2(&mut a.data[j..], b);
}
// The top digit of a, stored in a0, has now been zeroed.
debug_assert!(borrow == a0);
q.data[j] = q0;
// Pop off the next top digit of a.
a0 = a.data.pop().unwrap();
}
a.data.push(a0);
a.normalize();
debug_assert_eq!(cmp_slice(&a.data, b), Less);
(q.normalized(), a)
}
forward_val_ref_binop!(impl Div for BigUint, div);
forward_ref_val_binop!(impl Div for BigUint, div);
forward_val_assign!(impl DivAssign for BigUint, div_assign);
impl Div<BigUint> for BigUint {
type Output = BigUint;
#[inline]
fn div(self, other: BigUint) -> BigUint {
let (q, _) = div_rem(self, other);
q
}
}
impl Div<&BigUint> for &BigUint {
type Output = BigUint;
#[inline]
fn div(self, other: &BigUint) -> BigUint {
let (q, _) = self.div_rem(other);
q
}
}
impl DivAssign<&BigUint> for BigUint {
#[inline]
fn div_assign(&mut self, other: &BigUint) {
*self = &*self / other;
}
}
promote_unsigned_scalars!(impl Div for BigUint, div);
promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign);
forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigUint, div);
forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigUint, div);
forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigUint, div);
impl Div<u32> for BigUint {
type Output = BigUint;
#[inline]
fn div(self, other: u32) -> BigUint {
let (q, _) = div_rem_digit(self, other as BigDigit);
q
}
}
impl DivAssign<u32> for BigUint {
#[inline]
fn div_assign(&mut self, other: u32) {
*self = &*self / other;
}
}
impl Div<BigUint> for u32 {
type Output = BigUint;
#[inline]
fn div(self, other: BigUint) -> BigUint {
match other.data.len() {
0 => panic!("attempt to divide by zero"),
1 => From::from(self as BigDigit / other.data[0]),
_ => BigUint::ZERO,
}
}
}
impl Div<u64> for BigUint {
type Output = BigUint;
#[inline]
fn div(self, other: u64) -> BigUint {
let (q, _) = div_rem(self, From::from(other));
q
}
}
impl DivAssign<u64> for BigUint {
#[inline]
fn div_assign(&mut self, other: u64) {
// a vec of size 0 does not allocate, so this is fairly cheap
let temp = mem::replace(self, Self::ZERO);
*self = temp / other;
}
}
impl Div<BigUint> for u64 {
type Output = BigUint;
cfg_digit!(
#[inline]
fn div(self, other: BigUint) -> BigUint {
match other.data.len() {
0 => panic!("attempt to divide by zero"),
1 => From::from(self / u64::from(other.data[0])),
2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
_ => BigUint::ZERO,
}
}
#[inline]
fn div(self, other: BigUint) -> BigUint {
match other.data.len() {
0 => panic!("attempt to divide by zero"),
1 => From::from(self / other.data[0]),
_ => BigUint::ZERO,
}
}
);
}
impl Div<u128> for BigUint {
type Output = BigUint;
#[inline]
fn div(self, other: u128) -> BigUint {
let (q, _) = div_rem(self, From::from(other));
q
}
}
impl DivAssign<u128> for BigUint {
#[inline]
fn div_assign(&mut self, other: u128) {
*self = &*self / other;
}
}
impl Div<BigUint> for u128 {
type Output = BigUint;
cfg_digit!(
#[inline]
fn div(self, other: BigUint) -> BigUint {
use super::u32_to_u128;
match other.data.len() {
0 => panic!("attempt to divide by zero"),
1 => From::from(self / u128::from(other.data[0])),
2 => From::from(
self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])),
),
3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])),
4 => From::from(
self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]),
),
_ => BigUint::ZERO,
}
}
#[inline]
fn div(self, other: BigUint) -> BigUint {
match other.data.len() {
0 => panic!("attempt to divide by zero"),
1 => From::from(self / other.data[0] as u128),
2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
_ => BigUint::ZERO,
}
}
);
}
forward_val_ref_binop!(impl Rem for BigUint, rem);
forward_ref_val_binop!(impl Rem for BigUint, rem);
forward_val_assign!(impl RemAssign for BigUint, rem_assign);
impl Rem<BigUint> for BigUint {
type Output = BigUint;
#[inline]
fn rem(self, other: BigUint) -> BigUint {
if let Some(other) = other.to_u32() {
&self % other
} else {
let (_, r) = div_rem(self, other);
r
}
}
}
impl Rem<&BigUint> for &BigUint {
type Output = BigUint;
#[inline]
fn rem(self, other: &BigUint) -> BigUint {
if let Some(other) = other.to_u32() {
self % other
} else {
let (_, r) = self.div_rem(other);
r
}
}
}
impl RemAssign<&BigUint> for BigUint {
#[inline]
fn rem_assign(&mut self, other: &BigUint) {
*self = &*self % other;
}
}
promote_unsigned_scalars!(impl Rem for BigUint, rem);
promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign);
forward_all_scalar_binop_to_ref_val!(impl Rem<u32> for BigUint, rem);
forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigUint, rem);
forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigUint, rem);
impl Rem<u32> for &BigUint {
type Output = BigUint;
#[inline]
fn rem(self, other: u32) -> BigUint {
rem_digit(self, other as BigDigit).into()
}
}
impl RemAssign<u32> for BigUint {
#[inline]
fn rem_assign(&mut self, other: u32) {
*self = &*self % other;
}
}
impl Rem<&BigUint> for u32 {
type Output = BigUint;
#[inline]
fn rem(mut self, other: &BigUint) -> BigUint {
self %= other;
From::from(self)
}
}
macro_rules! impl_rem_assign_scalar {
($scalar:ty, $to_scalar:ident) => {
forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign);
impl RemAssign<&BigUint> for $scalar {
#[inline]
fn rem_assign(&mut self, other: &BigUint) {
*self = match other.$to_scalar() {
None => *self,
Some(0) => panic!("attempt to divide by zero"),
Some(v) => *self % v
};
}
}
}
}
// we can scalar %= BigUint for any scalar, including signed types
impl_rem_assign_scalar!(u128, to_u128);
impl_rem_assign_scalar!(usize, to_usize);
impl_rem_assign_scalar!(u64, to_u64);
impl_rem_assign_scalar!(u32, to_u32);
impl_rem_assign_scalar!(u16, to_u16);
impl_rem_assign_scalar!(u8, to_u8);
impl_rem_assign_scalar!(i128, to_i128);
impl_rem_assign_scalar!(isize, to_isize);
impl_rem_assign_scalar!(i64, to_i64);
impl_rem_assign_scalar!(i32, to_i32);
impl_rem_assign_scalar!(i16, to_i16);
impl_rem_assign_scalar!(i8, to_i8);
impl Rem<u64> for BigUint {
type Output = BigUint;
#[inline]
fn rem(self, other: u64) -> BigUint {
let (_, r) = div_rem(self, From::from(other));
r
}
}
impl RemAssign<u64> for BigUint {
#[inline]
fn rem_assign(&mut self, other: u64) {
*self = &*self % other;
}
}
impl Rem<BigUint> for u64 {
type Output = BigUint;
#[inline]
fn rem(mut self, other: BigUint) -> BigUint {
self %= other;
From::from(self)
}
}
impl Rem<u128> for BigUint {
type Output = BigUint;
#[inline]
fn rem(self, other: u128) -> BigUint {
let (_, r) = div_rem(self, From::from(other));
r
}
}
impl RemAssign<u128> for BigUint {
#[inline]
fn rem_assign(&mut self, other: u128) {
*self = &*self % other;
}
}
impl Rem<BigUint> for u128 {
type Output = BigUint;
#[inline]
fn rem(mut self, other: BigUint) -> BigUint {
self %= other;
From::from(self)
}
}
impl CheckedDiv for BigUint {
#[inline]
fn checked_div(&self, v: &BigUint) -> Option<BigUint> {
if v.is_zero() {
return None;
}
Some(self.div(v))
}
}
impl CheckedEuclid for BigUint {
#[inline]
fn checked_div_euclid(&self, v: &BigUint) -> Option<BigUint> {
if v.is_zero() {
return None;
}
Some(self.div_euclid(v))
}
#[inline]
fn checked_rem_euclid(&self, v: &BigUint) -> Option<BigUint> {
if v.is_zero() {
return None;
}
Some(self.rem_euclid(v))
}
fn checked_div_rem_euclid(&self, v: &Self) -> Option<(Self, Self)> {
Some(self.div_rem_euclid(v))
}
}
impl Euclid for BigUint {
#[inline]
fn div_euclid(&self, v: &BigUint) -> BigUint {
// trivially same as regular division
self / v
}
#[inline]
fn rem_euclid(&self, v: &BigUint) -> BigUint {
// trivially same as regular remainder
self % v
}
fn div_rem_euclid(&self, v: &Self) -> (Self, Self) {
// trivially same as regular division and remainder
self.div_rem(v)
}
}

361
vendor/num-bigint/src/biguint/iter.rs vendored Normal file
View File

@@ -0,0 +1,361 @@
use core::iter::FusedIterator;
cfg_digit!(
/// An iterator of `u32` digits representation of a `BigUint` or `BigInt`,
/// ordered least significant digit first.
pub struct U32Digits<'a> {
it: core::slice::Iter<'a, u32>,
}
/// An iterator of `u32` digits representation of a `BigUint` or `BigInt`,
/// ordered least significant digit first.
pub struct U32Digits<'a> {
data: &'a [u64],
next_is_lo: bool,
last_hi_is_zero: bool,
}
);
cfg_digit!(
const _: () = {
impl<'a> U32Digits<'a> {
#[inline]
pub(super) fn new(data: &'a [u32]) -> Self {
Self { it: data.iter() }
}
}
impl Iterator for U32Digits<'_> {
type Item = u32;
#[inline]
fn next(&mut self) -> Option<u32> {
self.it.next().cloned()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<u32> {
self.it.nth(n).cloned()
}
#[inline]
fn last(self) -> Option<u32> {
self.it.last().cloned()
}
#[inline]
fn count(self) -> usize {
self.it.count()
}
}
impl DoubleEndedIterator for U32Digits<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
self.it.next_back().cloned()
}
}
impl ExactSizeIterator for U32Digits<'_> {
#[inline]
fn len(&self) -> usize {
self.it.len()
}
}
};
const _: () = {
impl<'a> U32Digits<'a> {
#[inline]
pub(super) fn new(data: &'a [u64]) -> Self {
let last_hi_is_zero = data
.last()
.map(|&last| {
let last_hi = (last >> 32) as u32;
last_hi == 0
})
.unwrap_or(false);
U32Digits {
data,
next_is_lo: true,
last_hi_is_zero,
}
}
}
impl Iterator for U32Digits<'_> {
type Item = u32;
#[inline]
fn next(&mut self) -> Option<u32> {
match self.data.split_first() {
Some((&first, data)) => {
let next_is_lo = self.next_is_lo;
self.next_is_lo = !next_is_lo;
if next_is_lo {
Some(first as u32)
} else {
self.data = data;
if data.is_empty() && self.last_hi_is_zero {
self.last_hi_is_zero = false;
None
} else {
Some((first >> 32) as u32)
}
}
}
None => None,
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
#[inline]
fn last(self) -> Option<u32> {
self.data.last().map(|&last| {
if self.last_hi_is_zero {
last as u32
} else {
(last >> 32) as u32
}
})
}
#[inline]
fn count(self) -> usize {
self.len()
}
}
impl DoubleEndedIterator for U32Digits<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
match self.data.split_last() {
Some((&last, data)) => {
let last_is_lo = self.last_hi_is_zero;
self.last_hi_is_zero = !last_is_lo;
if last_is_lo {
self.data = data;
if data.is_empty() && !self.next_is_lo {
self.next_is_lo = true;
None
} else {
Some(last as u32)
}
} else {
Some((last >> 32) as u32)
}
}
None => None,
}
}
}
impl ExactSizeIterator for U32Digits<'_> {
#[inline]
fn len(&self) -> usize {
self.data.len() * 2
- usize::from(self.last_hi_is_zero)
- usize::from(!self.next_is_lo)
}
}
};
);
impl FusedIterator for U32Digits<'_> {}
cfg_digit!(
/// An iterator of `u64` digits representation of a `BigUint` or `BigInt`,
/// ordered least significant digit first.
pub struct U64Digits<'a> {
it: core::slice::Chunks<'a, u32>,
}
/// An iterator of `u64` digits representation of a `BigUint` or `BigInt`,
/// ordered least significant digit first.
pub struct U64Digits<'a> {
it: core::slice::Iter<'a, u64>,
}
);
cfg_digit!(
const _: () = {
impl<'a> U64Digits<'a> {
#[inline]
pub(super) fn new(data: &'a [u32]) -> Self {
U64Digits { it: data.chunks(2) }
}
}
impl Iterator for U64Digits<'_> {
type Item = u64;
#[inline]
fn next(&mut self) -> Option<u64> {
self.it.next().map(super::u32_chunk_to_u64)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
#[inline]
fn last(self) -> Option<u64> {
self.it.last().map(super::u32_chunk_to_u64)
}
#[inline]
fn count(self) -> usize {
self.len()
}
}
impl DoubleEndedIterator for U64Digits<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
self.it.next_back().map(super::u32_chunk_to_u64)
}
}
};
const _: () = {
impl<'a> U64Digits<'a> {
#[inline]
pub(super) fn new(data: &'a [u64]) -> Self {
Self { it: data.iter() }
}
}
impl Iterator for U64Digits<'_> {
type Item = u64;
#[inline]
fn next(&mut self) -> Option<u64> {
self.it.next().cloned()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<u64> {
self.it.nth(n).cloned()
}
#[inline]
fn last(self) -> Option<u64> {
self.it.last().cloned()
}
#[inline]
fn count(self) -> usize {
self.it.count()
}
}
impl DoubleEndedIterator for U64Digits<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
self.it.next_back().cloned()
}
}
};
);
impl ExactSizeIterator for U64Digits<'_> {
#[inline]
fn len(&self) -> usize {
self.it.len()
}
}
impl FusedIterator for U64Digits<'_> {}
#[test]
fn test_iter_u32_digits() {
let n = super::BigUint::from(5u8);
let mut it = n.iter_u32_digits();
assert_eq!(it.len(), 1);
assert_eq!(it.next(), Some(5));
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
let n = super::BigUint::from(112500000000u64);
let mut it = n.iter_u32_digits();
assert_eq!(it.len(), 2);
assert_eq!(it.next(), Some(830850304));
assert_eq!(it.len(), 1);
assert_eq!(it.next(), Some(26));
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
}
#[test]
fn test_iter_u64_digits() {
let n = super::BigUint::from(5u8);
let mut it = n.iter_u64_digits();
assert_eq!(it.len(), 1);
assert_eq!(it.next(), Some(5));
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
let n = super::BigUint::from(18_446_744_073_709_551_616u128);
let mut it = n.iter_u64_digits();
assert_eq!(it.len(), 2);
assert_eq!(it.next(), Some(0));
assert_eq!(it.len(), 1);
assert_eq!(it.next(), Some(1));
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
}
#[test]
fn test_iter_u32_digits_be() {
let n = super::BigUint::from(5u8);
let mut it = n.iter_u32_digits();
assert_eq!(it.len(), 1);
assert_eq!(it.next(), Some(5));
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
let n = super::BigUint::from(112500000000u64);
let mut it = n.iter_u32_digits();
assert_eq!(it.len(), 2);
assert_eq!(it.next(), Some(830850304));
assert_eq!(it.len(), 1);
assert_eq!(it.next(), Some(26));
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
}
#[test]
fn test_iter_u64_digits_be() {
let n = super::BigUint::from(5u8);
let mut it = n.iter_u64_digits();
assert_eq!(it.len(), 1);
assert_eq!(it.next_back(), Some(5));
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
let n = super::BigUint::from(18_446_744_073_709_551_616u128);
let mut it = n.iter_u64_digits();
assert_eq!(it.len(), 2);
assert_eq!(it.next_back(), Some(1));
assert_eq!(it.len(), 1);
assert_eq!(it.next_back(), Some(0));
assert_eq!(it.len(), 0);
assert_eq!(it.next(), None);
}

226
vendor/num-bigint/src/biguint/monty.rs vendored Normal file
View File

@@ -0,0 +1,226 @@
use alloc::vec::Vec;
use core::mem;
use core::ops::Shl;
use num_traits::One;
use crate::big_digit::{self, BigDigit, DoubleBigDigit};
use crate::biguint::BigUint;
struct MontyReducer {
n0inv: BigDigit,
}
// k0 = -m**-1 mod 2**BITS. Algorithm from: Dumas, J.G. "On NewtonRaphson
// Iteration for Multiplicative Inverses Modulo Prime Powers".
fn inv_mod_alt(b: BigDigit) -> BigDigit {
assert_ne!(b & 1, 0);
let mut k0 = BigDigit::wrapping_sub(2, b);
let mut t = b - 1;
let mut i = 1;
while i < big_digit::BITS {
t = t.wrapping_mul(t);
k0 = k0.wrapping_mul(t + 1);
i <<= 1;
}
debug_assert_eq!(k0.wrapping_mul(b), 1);
k0.wrapping_neg()
}
impl MontyReducer {
fn new(n: &BigUint) -> Self {
let n0inv = inv_mod_alt(n.data[0]);
MontyReducer { n0inv }
}
}
/// Computes z mod m = x * y * 2 ** (-n*_W) mod m
/// assuming k = -1/m mod 2**_W
/// See Gueron, "Efficient Software Implementations of Modular Exponentiation".
/// <https://eprint.iacr.org/2011/239.pdf>
/// In the terminology of that paper, this is an "Almost Montgomery Multiplication":
/// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result
/// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m.
#[allow(clippy::many_single_char_names)]
fn montgomery(x: &BigUint, y: &BigUint, m: &BigUint, k: BigDigit, n: usize) -> BigUint {
// This code assumes x, y, m are all the same length, n.
// (required by addMulVVW and the for loop).
// It also assumes that x, y are already reduced mod m,
// or else the result will not be properly reduced.
assert!(
x.data.len() == n && y.data.len() == n && m.data.len() == n,
"{:?} {:?} {:?} {}",
x,
y,
m,
n
);
let mut z = BigUint::ZERO;
z.data.resize(n * 2, 0);
let mut c: BigDigit = 0;
for i in 0..n {
let c2 = add_mul_vvw(&mut z.data[i..n + i], &x.data, y.data[i]);
let t = z.data[i].wrapping_mul(k);
let c3 = add_mul_vvw(&mut z.data[i..n + i], &m.data, t);
let cx = c.wrapping_add(c2);
let cy = cx.wrapping_add(c3);
z.data[n + i] = cy;
if cx < c2 || cy < c3 {
c = 1;
} else {
c = 0;
}
}
if c == 0 {
z.data = z.data[n..].to_vec();
} else {
{
let (first, second) = z.data.split_at_mut(n);
sub_vv(first, second, &m.data);
}
z.data = z.data[..n].to_vec();
}
z
}
#[inline(always)]
fn add_mul_vvw(z: &mut [BigDigit], x: &[BigDigit], y: BigDigit) -> BigDigit {
let mut c = 0;
for (zi, xi) in z.iter_mut().zip(x.iter()) {
let (z1, z0) = mul_add_www(*xi, y, *zi);
let (c_, zi_) = add_ww(z0, c, 0);
*zi = zi_;
c = c_ + z1;
}
c
}
/// The resulting carry c is either 0 or 1.
#[inline(always)]
fn sub_vv(z: &mut [BigDigit], x: &[BigDigit], y: &[BigDigit]) -> BigDigit {
let mut c = 0;
for (i, (xi, yi)) in x.iter().zip(y.iter()).enumerate().take(z.len()) {
let zi = xi.wrapping_sub(*yi).wrapping_sub(c);
z[i] = zi;
// see "Hacker's Delight", section 2-12 (overflow detection)
c = ((yi & !xi) | ((yi | !xi) & zi)) >> (big_digit::BITS - 1)
}
c
}
/// z1<<_W + z0 = x+y+c, with c == 0 or 1
#[inline(always)]
fn add_ww(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) {
let yc = y.wrapping_add(c);
let z0 = x.wrapping_add(yc);
let z1 = if z0 < x || yc < y { 1 } else { 0 };
(z1, z0)
}
/// z1 << _W + z0 = x * y + c
#[inline(always)]
fn mul_add_www(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) {
let z = x as DoubleBigDigit * y as DoubleBigDigit + c as DoubleBigDigit;
((z >> big_digit::BITS) as BigDigit, z as BigDigit)
}
/// Calculates x ** y mod m using a fixed, 4-bit window.
#[allow(clippy::many_single_char_names)]
pub(super) fn monty_modpow(x: &BigUint, y: &BigUint, m: &BigUint) -> BigUint {
assert!(m.data[0] & 1 == 1);
let mr = MontyReducer::new(m);
let num_words = m.data.len();
let mut x = x.clone();
// We want the lengths of x and m to be equal.
// It is OK if x >= m as long as len(x) == len(m).
if x.data.len() > num_words {
x %= m;
// Note: now len(x) <= numWords, not guaranteed ==.
}
if x.data.len() < num_words {
x.data.resize(num_words, 0);
}
// rr = 2**(2*_W*len(m)) mod m
let mut rr = BigUint::one();
rr = (rr.shl(2 * num_words as u64 * u64::from(big_digit::BITS))) % m;
if rr.data.len() < num_words {
rr.data.resize(num_words, 0);
}
// one = 1, with equal length to that of m
let mut one = BigUint::one();
one.data.resize(num_words, 0);
let n = 4;
// powers[i] contains x^i
let mut powers = Vec::with_capacity(1 << n);
powers.push(montgomery(&one, &rr, m, mr.n0inv, num_words));
powers.push(montgomery(&x, &rr, m, mr.n0inv, num_words));
for i in 2..1 << n {
let r = montgomery(&powers[i - 1], &powers[1], m, mr.n0inv, num_words);
powers.push(r);
}
// initialize z = 1 (Montgomery 1)
let mut z = powers[0].clone();
z.data.resize(num_words, 0);
let mut zz = BigUint::ZERO;
zz.data.resize(num_words, 0);
// same windowed exponent, but with Montgomery multiplications
for i in (0..y.data.len()).rev() {
let mut yi = y.data[i];
let mut j = 0;
while j < big_digit::BITS {
if i != y.data.len() - 1 || j != 0 {
zz = montgomery(&z, &z, m, mr.n0inv, num_words);
z = montgomery(&zz, &zz, m, mr.n0inv, num_words);
zz = montgomery(&z, &z, m, mr.n0inv, num_words);
z = montgomery(&zz, &zz, m, mr.n0inv, num_words);
}
zz = montgomery(
&z,
&powers[(yi >> (big_digit::BITS - n)) as usize],
m,
mr.n0inv,
num_words,
);
mem::swap(&mut z, &mut zz);
yi <<= n;
j += n;
}
}
// convert to regular number
zz = montgomery(&z, &one, m, mr.n0inv, num_words);
zz.normalize();
// One last reduction, just in case.
// See golang.org/issue/13907.
if zz >= *m {
// Common case is m has high bit set; in that case,
// since zz is the same length as m, there can be just
// one multiple of m to remove. Just subtract.
// We think that the subtract should be sufficient in general,
// so do that unconditionally, but double-check,
// in case our beliefs are wrong.
// The div is not expected to be reached.
zz -= m;
if zz >= *m {
zz %= m;
}
}
zz.normalize();
zz
}

View File

@@ -0,0 +1,626 @@
use super::addition::{__add2, add2};
use super::subtraction::sub2;
use super::{biguint_from_vec, cmp_slice, BigUint, IntDigits};
use crate::big_digit::{self, BigDigit, DoubleBigDigit};
use crate::Sign::{self, Minus, NoSign, Plus};
use crate::{BigInt, UsizePromotion};
use core::cmp::Ordering;
use core::iter::Product;
use core::ops::{Mul, MulAssign};
use num_traits::{CheckedMul, FromPrimitive, One, Zero};
#[inline]
pub(super) fn mac_with_carry(
a: BigDigit,
b: BigDigit,
c: BigDigit,
acc: &mut DoubleBigDigit,
) -> BigDigit {
*acc += DoubleBigDigit::from(a);
*acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c);
let lo = *acc as BigDigit;
*acc >>= big_digit::BITS;
lo
}
#[inline]
fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
*acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b);
let lo = *acc as BigDigit;
*acc >>= big_digit::BITS;
lo
}
/// Three argument multiply accumulate:
/// acc += b * c
fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) {
if c == 0 {
return;
}
let mut carry = 0;
let (a_lo, a_hi) = acc.split_at_mut(b.len());
for (a, &b) in a_lo.iter_mut().zip(b) {
*a = mac_with_carry(*a, b, c, &mut carry);
}
let (carry_hi, carry_lo) = big_digit::from_doublebigdigit(carry);
let final_carry = if carry_hi == 0 {
__add2(a_hi, &[carry_lo])
} else {
__add2(a_hi, &[carry_hi, carry_lo])
};
assert_eq!(final_carry, 0, "carry overflow during multiplication!");
}
fn bigint_from_slice(slice: &[BigDigit]) -> BigInt {
BigInt::from(biguint_from_vec(slice.to_vec()))
}
/// Three argument multiply accumulate:
/// acc += b * c
#[allow(clippy::many_single_char_names)]
fn mac3(mut acc: &mut [BigDigit], mut b: &[BigDigit], mut c: &[BigDigit]) {
// Least-significant zeros have no effect on the output.
if let Some(&0) = b.first() {
if let Some(nz) = b.iter().position(|&d| d != 0) {
b = &b[nz..];
acc = &mut acc[nz..];
} else {
return;
}
}
if let Some(&0) = c.first() {
if let Some(nz) = c.iter().position(|&d| d != 0) {
c = &c[nz..];
acc = &mut acc[nz..];
} else {
return;
}
}
let acc = acc;
let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) };
// We use four algorithms for different input sizes.
//
// - For small inputs, long multiplication is fastest.
// - If y is at least least twice as long as x, split using Half-Karatsuba.
// - Next we use Karatsuba multiplication (Toom-2), which we have optimized
// to avoid unnecessary allocations for intermediate values.
// - For the largest inputs we use Toom-3, which better optimizes the
// number of operations, but uses more temporary allocations.
//
// The thresholds are somewhat arbitrary, chosen by evaluating the results
// of `cargo bench --bench bigint multiply`.
if x.len() <= 32 {
// Long multiplication:
for (i, xi) in x.iter().enumerate() {
mac_digit(&mut acc[i..], y, *xi);
}
} else if x.len() * 2 <= y.len() {
// Karatsuba Multiplication for factors with significant length disparity.
//
// The Half-Karatsuba Multiplication Algorithm is a specialized case of
// the normal Karatsuba multiplication algorithm, designed for the scenario
// where y has at least twice as many base digits as x.
//
// In this case y (the longer input) is split into high2 and low2,
// at m2 (half the length of y) and x (the shorter input),
// is used directly without splitting.
//
// The algorithm then proceeds as follows:
//
// 1. Compute the product z0 = x * low2.
// 2. Compute the product temp = x * high2.
// 3. Adjust the weight of temp by adding m2 (* NBASE ^ m2)
// 4. Add temp and z0 to obtain the final result.
//
// Proof:
//
// The algorithm can be derived from the original Karatsuba algorithm by
// simplifying the formula when the shorter factor x is not split into
// high and low parts, as shown below.
//
// Original Karatsuba formula:
//
// result = (z2 * NBASE ^ (m2 × 2)) + ((z1 - z2 - z0) * NBASE ^ m2) + z0
//
// Substitutions:
//
// low1 = x
// high1 = 0
//
// Applying substitutions:
//
// z0 = (low1 * low2)
// = (x * low2)
//
// z1 = ((low1 + high1) * (low2 + high2))
// = ((x + 0) * (low2 + high2))
// = (x * low2) + (x * high2)
//
// z2 = (high1 * high2)
// = (0 * high2)
// = 0
//
// Simplified using the above substitutions:
//
// result = (z2 * NBASE ^ (m2 × 2)) + ((z1 - z2 - z0) * NBASE ^ m2) + z0
// = (0 * NBASE ^ (m2 × 2)) + ((z1 - 0 - z0) * NBASE ^ m2) + z0
// = ((z1 - z0) * NBASE ^ m2) + z0
// = ((z1 - z0) * NBASE ^ m2) + z0
// = (x * high2) * NBASE ^ m2 + z0
let m2 = y.len() / 2;
let (low2, high2) = y.split_at(m2);
// (x * high2) * NBASE ^ m2 + z0
mac3(acc, x, low2);
mac3(&mut acc[m2..], x, high2);
} else if x.len() <= 256 {
// Karatsuba multiplication:
//
// The idea is that we break x and y up into two smaller numbers that each have about half
// as many digits, like so (note that multiplying by b is just a shift):
//
// x = x0 + x1 * b
// y = y0 + y1 * b
//
// With some algebra, we can compute x * y with three smaller products, where the inputs to
// each of the smaller products have only about half as many digits as x and y:
//
// x * y = (x0 + x1 * b) * (y0 + y1 * b)
//
// x * y = x0 * y0
// + x0 * y1 * b
// + x1 * y0 * b
// + x1 * y1 * b^2
//
// Let p0 = x0 * y0 and p2 = x1 * y1:
//
// x * y = p0
// + (x0 * y1 + x1 * y0) * b
// + p2 * b^2
//
// The real trick is that middle term:
//
// x0 * y1 + x1 * y0
//
// = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2
//
// = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2
//
// Now we complete the square:
//
// = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2
//
// = -((x1 - x0) * (y1 - y0)) + p0 + p2
//
// Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula:
//
// x * y = p0
// + (p0 + p2 - p1) * b
// + p2 * b^2
//
// Where the three intermediate products are:
//
// p0 = x0 * y0
// p1 = (x1 - x0) * (y1 - y0)
// p2 = x1 * y1
//
// In doing the computation, we take great care to avoid unnecessary temporary variables
// (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a
// bit so we can use the same temporary variable for all the intermediate products:
//
// x * y = p2 * b^2 + p2 * b
// + p0 * b + p0
// - p1 * b
//
// The other trick we use is instead of doing explicit shifts, we slice acc at the
// appropriate offset when doing the add.
// When x is smaller than y, it's significantly faster to pick b such that x is split in
// half, not y:
let b = x.len() / 2;
let (x0, x1) = x.split_at(b);
let (y0, y1) = y.split_at(b);
// We reuse the same BigUint for all the intermediate multiplies and have to size p
// appropriately here: x1.len() >= x0.len and y1.len() >= y0.len():
let len = x1.len() + y1.len() + 1;
let mut p = BigUint { data: vec![0; len] };
// p2 = x1 * y1
mac3(&mut p.data, x1, y1);
// Not required, but the adds go faster if we drop any unneeded 0s from the end:
p.normalize();
add2(&mut acc[b..], &p.data);
add2(&mut acc[b * 2..], &p.data);
// Zero out p before the next multiply:
p.data.truncate(0);
p.data.resize(len, 0);
// p0 = x0 * y0
mac3(&mut p.data, x0, y0);
p.normalize();
add2(acc, &p.data);
add2(&mut acc[b..], &p.data);
// p1 = (x1 - x0) * (y1 - y0)
// We do this one last, since it may be negative and acc can't ever be negative:
let (j0_sign, j0) = sub_sign(x1, x0);
let (j1_sign, j1) = sub_sign(y1, y0);
match j0_sign * j1_sign {
Plus => {
p.data.truncate(0);
p.data.resize(len, 0);
mac3(&mut p.data, &j0.data, &j1.data);
p.normalize();
sub2(&mut acc[b..], &p.data);
}
Minus => {
mac3(&mut acc[b..], &j0.data, &j1.data);
}
NoSign => (),
}
} else {
// Toom-3 multiplication:
//
// Toom-3 is like Karatsuba above, but dividing the inputs into three parts.
// Both are instances of Toom-Cook, using `k=3` and `k=2` respectively.
//
// The general idea is to treat the large integers digits as
// polynomials of a certain degree and determine the coefficients/digits
// of the product of the two via interpolation of the polynomial product.
let i = y.len() / 3 + 1;
let x0_len = Ord::min(x.len(), i);
let x1_len = Ord::min(x.len() - x0_len, i);
let y0_len = i;
let y1_len = Ord::min(y.len() - y0_len, i);
// Break x and y into three parts, representating an order two polynomial.
// t is chosen to be the size of a digit so we can use faster shifts
// in place of multiplications.
//
// x(t) = x2*t^2 + x1*t + x0
let x0 = bigint_from_slice(&x[..x0_len]);
let x1 = bigint_from_slice(&x[x0_len..x0_len + x1_len]);
let x2 = bigint_from_slice(&x[x0_len + x1_len..]);
// y(t) = y2*t^2 + y1*t + y0
let y0 = bigint_from_slice(&y[..y0_len]);
let y1 = bigint_from_slice(&y[y0_len..y0_len + y1_len]);
let y2 = bigint_from_slice(&y[y0_len + y1_len..]);
// Let w(t) = x(t) * y(t)
//
// This gives us the following order-4 polynomial.
//
// w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0
//
// We need to find the coefficients w4, w3, w2, w1 and w0. Instead
// of simply multiplying the x and y in total, we can evaluate w
// at 5 points. An n-degree polynomial is uniquely identified by (n + 1)
// points.
//
// It is arbitrary as to what points we evaluate w at but we use the
// following.
//
// w(t) at t = 0, 1, -1, -2 and inf
//
// The values for w(t) in terms of x(t)*y(t) at these points are:
//
// let a = w(0) = x0 * y0
// let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0)
// let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0)
// let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0)
// let e = w(inf) = x2 * y2 as t -> inf
// x0 + x2, avoiding temporaries
let p = &x0 + &x2;
// y0 + y2, avoiding temporaries
let q = &y0 + &y2;
// x2 - x1 + x0, avoiding temporaries
let p2 = &p - &x1;
// y2 - y1 + y0, avoiding temporaries
let q2 = &q - &y1;
// w(0)
let r0 = &x0 * &y0;
// w(inf)
let r4 = &x2 * &y2;
// w(1)
let r1 = (p + x1) * (q + y1);
// w(-1)
let r2 = &p2 * &q2;
// w(-2)
let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0);
// Evaluating these points gives us the following system of linear equations.
//
// 0 0 0 0 1 | a
// 1 1 1 1 1 | b
// 1 -1 1 -1 1 | c
// 16 -8 4 -2 1 | d
// 1 0 0 0 0 | e
//
// The solved equation (after gaussian elimination or similar)
// in terms of its coefficients:
//
// w0 = w(0)
// w1 = w(0)/2 + w(1)/3 - w(-1) + w(-2)/6 - 2*w(inf)
// w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf)
// w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(-2)/6 + 2*w(inf)
// w4 = w(inf)
//
// This particular sequence is given by Bodrato and is an interpolation
// of the above equations.
let mut comp3: BigInt = (r3 - &r1) / 3u32;
let mut comp1: BigInt = (r1 - &r2) >> 1;
let mut comp2: BigInt = r2 - &r0;
comp3 = ((&comp2 - comp3) >> 1) + (&r4 << 1);
comp2 += &comp1 - &r4;
comp1 -= &comp3;
// Recomposition. The coefficients of the polynomial are now known.
//
// Evaluate at w(t) where t is our given base to get the result.
//
// let bits = u64::from(big_digit::BITS) * i as u64;
// let result = r0
// + (comp1 << bits)
// + (comp2 << (2 * bits))
// + (comp3 << (3 * bits))
// + (r4 << (4 * bits));
// let result_pos = result.to_biguint().unwrap();
// add2(&mut acc[..], &result_pos.data);
//
// But with less intermediate copying:
for (j, result) in [&r0, &comp1, &comp2, &comp3, &r4].iter().enumerate().rev() {
match result.sign() {
Plus => add2(&mut acc[i * j..], result.digits()),
Minus => sub2(&mut acc[i * j..], result.digits()),
NoSign => {}
}
}
}
}
fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint {
let len = x.len() + y.len() + 1;
let mut prod = BigUint { data: vec![0; len] };
mac3(&mut prod.data, x, y);
prod.normalized()
}
fn scalar_mul(a: &mut BigUint, b: BigDigit) {
match b {
0 => a.set_zero(),
1 => {}
_ => {
if b.is_power_of_two() {
*a <<= b.trailing_zeros();
} else {
let mut carry = 0;
for a in a.data.iter_mut() {
*a = mul_with_carry(*a, b, &mut carry);
}
if carry != 0 {
a.data.push(carry as BigDigit);
}
}
}
}
}
fn sub_sign(mut a: &[BigDigit], mut b: &[BigDigit]) -> (Sign, BigUint) {
// Normalize:
if let Some(&0) = a.last() {
a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
}
if let Some(&0) = b.last() {
b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
}
match cmp_slice(a, b) {
Ordering::Greater => {
let mut a = a.to_vec();
sub2(&mut a, b);
(Plus, biguint_from_vec(a))
}
Ordering::Less => {
let mut b = b.to_vec();
sub2(&mut b, a);
(Minus, biguint_from_vec(b))
}
Ordering::Equal => (NoSign, BigUint::ZERO),
}
}
macro_rules! impl_mul {
($(impl Mul<$Other:ty> for $Self:ty;)*) => {$(
impl Mul<$Other> for $Self {
type Output = BigUint;
#[inline]
fn mul(self, other: $Other) -> BigUint {
match (&*self.data, &*other.data) {
// multiply by zero
(&[], _) | (_, &[]) => BigUint::ZERO,
// multiply by a scalar
(_, &[digit]) => self * digit,
(&[digit], _) => other * digit,
// full multiplication
(x, y) => mul3(x, y),
}
}
}
)*}
}
impl_mul! {
impl Mul<BigUint> for BigUint;
impl Mul<BigUint> for &BigUint;
impl Mul<&BigUint> for BigUint;
impl Mul<&BigUint> for &BigUint;
}
macro_rules! impl_mul_assign {
($(impl MulAssign<$Other:ty> for BigUint;)*) => {$(
impl MulAssign<$Other> for BigUint {
#[inline]
fn mul_assign(&mut self, other: $Other) {
match (&*self.data, &*other.data) {
// multiply by zero
(&[], _) => {},
(_, &[]) => self.set_zero(),
// multiply by a scalar
(_, &[digit]) => *self *= digit,
(&[digit], _) => *self = other * digit,
// full multiplication
(x, y) => *self = mul3(x, y),
}
}
}
)*}
}
impl_mul_assign! {
impl MulAssign<BigUint> for BigUint;
impl MulAssign<&BigUint> for BigUint;
}
promote_unsigned_scalars!(impl Mul for BigUint, mul);
promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign);
forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigUint, mul);
forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigUint, mul);
forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigUint, mul);
impl Mul<u32> for BigUint {
type Output = BigUint;
#[inline]
fn mul(mut self, other: u32) -> BigUint {
self *= other;
self
}
}
impl MulAssign<u32> for BigUint {
#[inline]
fn mul_assign(&mut self, other: u32) {
scalar_mul(self, other as BigDigit);
}
}
impl Mul<u64> for BigUint {
type Output = BigUint;
#[inline]
fn mul(mut self, other: u64) -> BigUint {
self *= other;
self
}
}
impl MulAssign<u64> for BigUint {
cfg_digit!(
#[inline]
fn mul_assign(&mut self, other: u64) {
if let Some(other) = BigDigit::from_u64(other) {
scalar_mul(self, other);
} else {
let (hi, lo) = big_digit::from_doublebigdigit(other);
*self = mul3(&self.data, &[lo, hi]);
}
}
#[inline]
fn mul_assign(&mut self, other: u64) {
scalar_mul(self, other);
}
);
}
impl Mul<u128> for BigUint {
type Output = BigUint;
#[inline]
fn mul(mut self, other: u128) -> BigUint {
self *= other;
self
}
}
impl MulAssign<u128> for BigUint {
cfg_digit!(
#[inline]
fn mul_assign(&mut self, other: u128) {
if let Some(other) = BigDigit::from_u128(other) {
scalar_mul(self, other);
} else {
*self = match super::u32_from_u128(other) {
(0, 0, c, d) => mul3(&self.data, &[d, c]),
(0, b, c, d) => mul3(&self.data, &[d, c, b]),
(a, b, c, d) => mul3(&self.data, &[d, c, b, a]),
};
}
}
#[inline]
fn mul_assign(&mut self, other: u128) {
if let Some(other) = BigDigit::from_u128(other) {
scalar_mul(self, other);
} else {
let (hi, lo) = big_digit::from_doublebigdigit(other);
*self = mul3(&self.data, &[lo, hi]);
}
}
);
}
impl CheckedMul for BigUint {
#[inline]
fn checked_mul(&self, v: &BigUint) -> Option<BigUint> {
Some(self.mul(v))
}
}
impl_product_iter_type!(BigUint);
#[test]
fn test_sub_sign() {
use crate::BigInt;
use num_traits::Num;
fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt {
let (sign, val) = sub_sign(a, b);
BigInt::from_biguint(sign, val)
}
let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap();
let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap();
let a_i = BigInt::from(a.clone());
let b_i = BigInt::from(b.clone());
assert_eq!(sub_sign_i(&a.data, &b.data), &a_i - &b_i);
assert_eq!(sub_sign_i(&b.data, &a.data), &b_i - &a_i);
}

258
vendor/num-bigint/src/biguint/power.rs vendored Normal file
View File

@@ -0,0 +1,258 @@
use super::monty::monty_modpow;
use super::BigUint;
use crate::big_digit::{self, BigDigit};
use num_integer::Integer;
use num_traits::{One, Pow, ToPrimitive, Zero};
impl Pow<&BigUint> for BigUint {
type Output = BigUint;
#[inline]
fn pow(self, exp: &BigUint) -> BigUint {
if self.is_one() || exp.is_zero() {
BigUint::one()
} else if self.is_zero() {
Self::ZERO
} else if let Some(exp) = exp.to_u64() {
self.pow(exp)
} else if let Some(exp) = exp.to_u128() {
self.pow(exp)
} else {
// At this point, `self >= 2` and `exp >= 2¹²⁸`. The smallest possible result given
// `2.pow(2¹²⁸)` would require far more memory than 64-bit targets can address!
panic!("memory overflow")
}
}
}
impl Pow<BigUint> for BigUint {
type Output = BigUint;
#[inline]
fn pow(self, exp: BigUint) -> BigUint {
Pow::pow(self, &exp)
}
}
impl Pow<&BigUint> for &BigUint {
type Output = BigUint;
#[inline]
fn pow(self, exp: &BigUint) -> BigUint {
if self.is_one() || exp.is_zero() {
BigUint::one()
} else if self.is_zero() {
BigUint::ZERO
} else {
self.clone().pow(exp)
}
}
}
impl Pow<BigUint> for &BigUint {
type Output = BigUint;
#[inline]
fn pow(self, exp: BigUint) -> BigUint {
Pow::pow(self, &exp)
}
}
macro_rules! pow_impl {
($T:ty) => {
impl Pow<$T> for BigUint {
type Output = BigUint;
fn pow(self, mut exp: $T) -> BigUint {
if exp == 0 {
return BigUint::one();
}
let mut base = self;
while exp & 1 == 0 {
base = &base * &base;
exp >>= 1;
}
if exp == 1 {
return base;
}
let mut acc = base.clone();
while exp > 1 {
exp >>= 1;
base = &base * &base;
if exp & 1 == 1 {
acc *= &base;
}
}
acc
}
}
impl Pow<&$T> for BigUint {
type Output = BigUint;
#[inline]
fn pow(self, exp: &$T) -> BigUint {
Pow::pow(self, *exp)
}
}
impl Pow<$T> for &BigUint {
type Output = BigUint;
#[inline]
fn pow(self, exp: $T) -> BigUint {
if exp == 0 {
return BigUint::one();
}
Pow::pow(self.clone(), exp)
}
}
impl Pow<&$T> for &BigUint {
type Output = BigUint;
#[inline]
fn pow(self, exp: &$T) -> BigUint {
Pow::pow(self, *exp)
}
}
};
}
pow_impl!(u8);
pow_impl!(u16);
pow_impl!(u32);
pow_impl!(u64);
pow_impl!(usize);
pow_impl!(u128);
pub(super) fn modpow(x: &BigUint, exponent: &BigUint, modulus: &BigUint) -> BigUint {
assert!(
!modulus.is_zero(),
"attempt to calculate with zero modulus!"
);
if modulus.is_odd() {
// For an odd modulus, we can use Montgomery multiplication in base 2^32.
monty_modpow(x, exponent, modulus)
} else {
// Otherwise do basically the same as `num::pow`, but with a modulus.
plain_modpow(x, &exponent.data, modulus)
}
}
fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint {
assert!(
!modulus.is_zero(),
"attempt to calculate with zero modulus!"
);
let i = match exp_data.iter().position(|&r| r != 0) {
None => return BigUint::one(),
Some(i) => i,
};
let mut base = base % modulus;
for _ in 0..i {
for _ in 0..big_digit::BITS {
base = &base * &base % modulus;
}
}
let mut r = exp_data[i];
let mut b = 0u8;
while r.is_even() {
base = &base * &base % modulus;
r >>= 1;
b += 1;
}
let mut exp_iter = exp_data[i + 1..].iter();
if exp_iter.len() == 0 && r.is_one() {
return base;
}
let mut acc = base.clone();
r >>= 1;
b += 1;
{
let mut unit = |exp_is_odd| {
base = &base * &base % modulus;
if exp_is_odd {
acc *= &base;
acc %= modulus;
}
};
if let Some(&last) = exp_iter.next_back() {
// consume exp_data[i]
for _ in b..big_digit::BITS {
unit(r.is_odd());
r >>= 1;
}
// consume all other digits before the last
for &r in exp_iter {
let mut r = r;
for _ in 0..big_digit::BITS {
unit(r.is_odd());
r >>= 1;
}
}
r = last;
}
debug_assert_ne!(r, 0);
while !r.is_zero() {
unit(r.is_odd());
r >>= 1;
}
}
acc
}
#[test]
fn test_plain_modpow() {
let two = &BigUint::from(2u32);
let modulus = BigUint::from(0x1100u32);
let exp = vec![0, 0b1];
assert_eq!(
two.pow(0b1_00000000_u32) % &modulus,
plain_modpow(two, &exp, &modulus)
);
let exp = vec![0, 0b10];
assert_eq!(
two.pow(0b10_00000000_u32) % &modulus,
plain_modpow(two, &exp, &modulus)
);
let exp = vec![0, 0b110010];
assert_eq!(
two.pow(0b110010_00000000_u32) % &modulus,
plain_modpow(two, &exp, &modulus)
);
let exp = vec![0b1, 0b1];
assert_eq!(
two.pow(0b1_00000001_u32) % &modulus,
plain_modpow(two, &exp, &modulus)
);
let exp = vec![0b1100, 0, 0b1];
assert_eq!(
two.pow(0b1_00000000_00001100_u32) % &modulus,
plain_modpow(two, &exp, &modulus)
);
}
#[test]
fn test_pow_biguint() {
let base = BigUint::from(5u8);
let exponent = BigUint::from(3u8);
assert_eq!(BigUint::from(125u8), base.pow(exponent));
}

121
vendor/num-bigint/src/biguint/serde.rs vendored Normal file
View File

@@ -0,0 +1,121 @@
#![cfg(feature = "serde")]
#![cfg_attr(docsrs, doc(cfg(feature = "serde")))]
use super::{biguint_from_vec, BigUint};
use alloc::vec::Vec;
use core::{cmp, fmt, mem};
use serde::de::{SeqAccess, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
// `cautious` is based on the function of the same name in `serde`, but specialized to `u32`:
// https://github.com/dtolnay/serde/blob/399ef081ecc36d2f165ff1f6debdcbf6a1dc7efb/serde/src/private/size_hint.rs#L11-L22
fn cautious(hint: Option<usize>) -> usize {
const MAX_PREALLOC_BYTES: usize = 1024 * 1024;
cmp::min(
hint.unwrap_or(0),
MAX_PREALLOC_BYTES / mem::size_of::<u32>(),
)
}
impl Serialize for BigUint {
cfg_digit!(
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// Note: do not change the serialization format, or it may break forward
// and backward compatibility of serialized data! If we ever change the
// internal representation, we should still serialize in base-`u32`.
let data: &[u32] = &self.data;
data.serialize(serializer)
}
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
use serde::ser::SerializeSeq;
if let Some((&last, data)) = self.data.split_last() {
let last_lo = last as u32;
let last_hi = (last >> 32) as u32;
let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize;
let mut seq = serializer.serialize_seq(Some(u32_len))?;
for &x in data {
seq.serialize_element(&(x as u32))?;
seq.serialize_element(&((x >> 32) as u32))?;
}
seq.serialize_element(&last_lo)?;
if last_hi != 0 {
seq.serialize_element(&last_hi)?;
}
seq.end()
} else {
let data: &[u32] = &[];
data.serialize(serializer)
}
}
);
}
impl<'de> Deserialize<'de> for BigUint {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_seq(U32Visitor)
}
}
struct U32Visitor;
impl<'de> Visitor<'de> for U32Visitor {
type Value = BigUint;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence of unsigned 32-bit numbers")
}
cfg_digit!(
fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
where
S: SeqAccess<'de>,
{
let len = cautious(seq.size_hint());
let mut data = Vec::with_capacity(len);
while let Some(value) = seq.next_element::<u32>()? {
data.push(value);
}
Ok(biguint_from_vec(data))
}
fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
where
S: SeqAccess<'de>,
{
use crate::big_digit::BigDigit;
use num_integer::Integer;
let u32_len = cautious(seq.size_hint());
let len = Integer::div_ceil(&u32_len, &2);
let mut data = Vec::with_capacity(len);
while let Some(lo) = seq.next_element::<u32>()? {
let mut value = BigDigit::from(lo);
if let Some(hi) = seq.next_element::<u32>()? {
value |= BigDigit::from(hi) << 32;
data.push(value);
} else {
data.push(value);
break;
}
}
Ok(biguint_from_vec(data))
}
);
}

173
vendor/num-bigint/src/biguint/shift.rs vendored Normal file
View File

@@ -0,0 +1,173 @@
use super::{biguint_from_vec, BigUint};
use crate::big_digit;
use alloc::borrow::Cow;
use alloc::vec::Vec;
use core::mem;
use core::ops::{Shl, ShlAssign, Shr, ShrAssign};
use num_traits::{PrimInt, Zero};
#[inline]
fn biguint_shl<T: PrimInt>(n: Cow<'_, BigUint>, shift: T) -> BigUint {
if shift < T::zero() {
panic!("attempt to shift left with negative");
}
if n.is_zero() {
return n.into_owned();
}
let bits = T::from(big_digit::BITS).unwrap();
let digits = (shift / bits).to_usize().expect("capacity overflow");
let shift = (shift % bits).to_u8().unwrap();
biguint_shl2(n, digits, shift)
}
fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint {
let mut data = match digits {
0 => n.into_owned().data,
_ => {
let len = digits.saturating_add(n.data.len() + 1);
let mut data = Vec::with_capacity(len);
data.resize(digits, 0);
data.extend(n.data.iter());
data
}
};
if shift > 0 {
let mut carry = 0;
let carry_shift = big_digit::BITS - shift;
for elem in data[digits..].iter_mut() {
let new_carry = *elem >> carry_shift;
*elem = (*elem << shift) | carry;
carry = new_carry;
}
if carry != 0 {
data.push(carry);
}
}
biguint_from_vec(data)
}
#[inline]
fn biguint_shr<T: PrimInt>(n: Cow<'_, BigUint>, shift: T) -> BigUint {
if shift < T::zero() {
panic!("attempt to shift right with negative");
}
if n.is_zero() {
return n.into_owned();
}
let bits = T::from(big_digit::BITS).unwrap();
let digits = (shift / bits).to_usize().unwrap_or(usize::MAX);
let shift = (shift % bits).to_u8().unwrap();
biguint_shr2(n, digits, shift)
}
fn biguint_shr2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint {
if digits >= n.data.len() {
let mut n = n.into_owned();
n.set_zero();
return n;
}
let mut data = match n {
Cow::Borrowed(n) => n.data[digits..].to_vec(),
Cow::Owned(mut n) => {
n.data.drain(..digits);
n.data
}
};
if shift > 0 {
let mut borrow = 0;
let borrow_shift = big_digit::BITS - shift;
for elem in data.iter_mut().rev() {
let new_borrow = *elem << borrow_shift;
*elem = (*elem >> shift) | borrow;
borrow = new_borrow;
}
}
biguint_from_vec(data)
}
macro_rules! impl_shift {
(@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => {
impl $Shx<&$rhs> for BigUint {
type Output = BigUint;
#[inline]
fn $shx(self, rhs: &$rhs) -> BigUint {
$Shx::$shx(self, *rhs)
}
}
impl $Shx<&$rhs> for &BigUint {
type Output = BigUint;
#[inline]
fn $shx(self, rhs: &$rhs) -> BigUint {
$Shx::$shx(self, *rhs)
}
}
impl $ShxAssign<&$rhs> for BigUint {
#[inline]
fn $shx_assign(&mut self, rhs: &$rhs) {
$ShxAssign::$shx_assign(self, *rhs);
}
}
};
($($rhs:ty),+) => {$(
impl Shl<$rhs> for BigUint {
type Output = BigUint;
#[inline]
fn shl(self, rhs: $rhs) -> BigUint {
biguint_shl(Cow::Owned(self), rhs)
}
}
impl Shl<$rhs> for &BigUint {
type Output = BigUint;
#[inline]
fn shl(self, rhs: $rhs) -> BigUint {
biguint_shl(Cow::Borrowed(self), rhs)
}
}
impl ShlAssign<$rhs> for BigUint {
#[inline]
fn shl_assign(&mut self, rhs: $rhs) {
let n = mem::replace(self, Self::ZERO);
*self = n << rhs;
}
}
impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs }
impl Shr<$rhs> for BigUint {
type Output = BigUint;
#[inline]
fn shr(self, rhs: $rhs) -> BigUint {
biguint_shr(Cow::Owned(self), rhs)
}
}
impl Shr<$rhs> for &BigUint {
type Output = BigUint;
#[inline]
fn shr(self, rhs: $rhs) -> BigUint {
biguint_shr(Cow::Borrowed(self), rhs)
}
}
impl ShrAssign<$rhs> for BigUint {
#[inline]
fn shr_assign(&mut self, rhs: $rhs) {
let n = mem::replace(self, Self::ZERO);
*self = n >> rhs;
}
}
impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs }
)*};
}
impl_shift! { u8, u16, u32, u64, u128, usize }
impl_shift! { i8, i16, i32, i64, i128, isize }

View File

@@ -0,0 +1,312 @@
use super::BigUint;
use crate::big_digit::{self, BigDigit};
use crate::UsizePromotion;
use core::cmp::Ordering::{Equal, Greater, Less};
use core::ops::{Sub, SubAssign};
use num_traits::CheckedSub;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64 as arch;
#[cfg(target_arch = "x86")]
use core::arch::x86 as arch;
// Subtract with borrow:
#[cfg(target_arch = "x86_64")]
cfg_64!(
#[inline]
fn sbb(borrow: u8, a: u64, b: u64, out: &mut u64) -> u8 {
// Safety: There are absolutely no safety concerns with calling `_subborrow_u64`.
// It's just unsafe for API consistency with other intrinsics.
unsafe { arch::_subborrow_u64(borrow, a, b, out) }
}
);
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
cfg_32!(
#[inline]
fn sbb(borrow: u8, a: u32, b: u32, out: &mut u32) -> u8 {
// Safety: There are absolutely no safety concerns with calling `_subborrow_u32`.
// It's just unsafe for API consistency with other intrinsics.
unsafe { arch::_subborrow_u32(borrow, a, b, out) }
}
);
// fallback for environments where we don't have a subborrow intrinsic
// (copied from the standard library's `borrowing_sub`)
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
#[inline]
fn sbb(borrow: u8, lhs: BigDigit, rhs: BigDigit, out: &mut BigDigit) -> u8 {
let (a, b) = lhs.overflowing_sub(rhs);
let (c, d) = a.overflowing_sub(borrow as BigDigit);
*out = c;
u8::from(b || d)
}
pub(super) fn sub2(a: &mut [BigDigit], b: &[BigDigit]) {
let mut borrow = 0;
let len = Ord::min(a.len(), b.len());
let (a_lo, a_hi) = a.split_at_mut(len);
let (b_lo, b_hi) = b.split_at(len);
for (a, b) in a_lo.iter_mut().zip(b_lo) {
borrow = sbb(borrow, *a, *b, a);
}
if borrow != 0 {
for a in a_hi {
borrow = sbb(borrow, *a, 0, a);
if borrow == 0 {
break;
}
}
}
// note: we're _required_ to fail on underflow
assert!(
borrow == 0 && b_hi.iter().all(|x| *x == 0),
"Cannot subtract b from a because b is larger than a."
);
}
// Only for the Sub impl. `a` and `b` must have same length.
#[inline]
fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> u8 {
debug_assert!(b.len() == a.len());
let mut borrow = 0;
for (ai, bi) in a.iter().zip(b) {
borrow = sbb(borrow, *ai, *bi, bi);
}
borrow
}
fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) {
debug_assert!(b.len() >= a.len());
let len = Ord::min(a.len(), b.len());
let (a_lo, a_hi) = a.split_at(len);
let (b_lo, b_hi) = b.split_at_mut(len);
let borrow = __sub2rev(a_lo, b_lo);
assert!(a_hi.is_empty());
// note: we're _required_ to fail on underflow
assert!(
borrow == 0 && b_hi.iter().all(|x| *x == 0),
"Cannot subtract b from a because b is larger than a."
);
}
forward_val_val_binop!(impl Sub for BigUint, sub);
forward_ref_ref_binop!(impl Sub for BigUint, sub);
forward_val_assign!(impl SubAssign for BigUint, sub_assign);
impl Sub<&BigUint> for BigUint {
type Output = BigUint;
fn sub(mut self, other: &BigUint) -> BigUint {
self -= other;
self
}
}
impl SubAssign<&BigUint> for BigUint {
fn sub_assign(&mut self, other: &BigUint) {
sub2(&mut self.data[..], &other.data[..]);
self.normalize();
}
}
impl Sub<BigUint> for &BigUint {
type Output = BigUint;
fn sub(self, mut other: BigUint) -> BigUint {
let other_len = other.data.len();
if other_len < self.data.len() {
let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data);
other.data.extend_from_slice(&self.data[other_len..]);
if lo_borrow != 0 {
sub2(&mut other.data[other_len..], &[1])
}
} else {
sub2rev(&self.data[..], &mut other.data[..]);
}
other.normalized()
}
}
promote_unsigned_scalars!(impl Sub for BigUint, sub);
promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign);
forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigUint, sub);
forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigUint, sub);
forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigUint, sub);
impl Sub<u32> for BigUint {
type Output = BigUint;
#[inline]
fn sub(mut self, other: u32) -> BigUint {
self -= other;
self
}
}
impl SubAssign<u32> for BigUint {
fn sub_assign(&mut self, other: u32) {
sub2(&mut self.data[..], &[other as BigDigit]);
self.normalize();
}
}
impl Sub<BigUint> for u32 {
type Output = BigUint;
cfg_digit!(
#[inline]
fn sub(self, mut other: BigUint) -> BigUint {
if other.data.len() == 0 {
other.data.push(self);
} else {
sub2rev(&[self], &mut other.data[..]);
}
other.normalized()
}
#[inline]
fn sub(self, mut other: BigUint) -> BigUint {
if other.data.is_empty() {
other.data.push(self as BigDigit);
} else {
sub2rev(&[self as BigDigit], &mut other.data[..]);
}
other.normalized()
}
);
}
impl Sub<u64> for BigUint {
type Output = BigUint;
#[inline]
fn sub(mut self, other: u64) -> BigUint {
self -= other;
self
}
}
impl SubAssign<u64> for BigUint {
cfg_digit!(
#[inline]
fn sub_assign(&mut self, other: u64) {
let (hi, lo) = big_digit::from_doublebigdigit(other);
sub2(&mut self.data[..], &[lo, hi]);
self.normalize();
}
#[inline]
fn sub_assign(&mut self, other: u64) {
sub2(&mut self.data[..], &[other as BigDigit]);
self.normalize();
}
);
}
impl Sub<BigUint> for u64 {
type Output = BigUint;
cfg_digit!(
#[inline]
fn sub(self, mut other: BigUint) -> BigUint {
while other.data.len() < 2 {
other.data.push(0);
}
let (hi, lo) = big_digit::from_doublebigdigit(self);
sub2rev(&[lo, hi], &mut other.data[..]);
other.normalized()
}
#[inline]
fn sub(self, mut other: BigUint) -> BigUint {
if other.data.is_empty() {
other.data.push(self);
} else {
sub2rev(&[self], &mut other.data[..]);
}
other.normalized()
}
);
}
impl Sub<u128> for BigUint {
type Output = BigUint;
#[inline]
fn sub(mut self, other: u128) -> BigUint {
self -= other;
self
}
}
impl SubAssign<u128> for BigUint {
cfg_digit!(
#[inline]
fn sub_assign(&mut self, other: u128) {
let (a, b, c, d) = super::u32_from_u128(other);
sub2(&mut self.data[..], &[d, c, b, a]);
self.normalize();
}
#[inline]
fn sub_assign(&mut self, other: u128) {
let (hi, lo) = big_digit::from_doublebigdigit(other);
sub2(&mut self.data[..], &[lo, hi]);
self.normalize();
}
);
}
impl Sub<BigUint> for u128 {
type Output = BigUint;
cfg_digit!(
#[inline]
fn sub(self, mut other: BigUint) -> BigUint {
while other.data.len() < 4 {
other.data.push(0);
}
let (a, b, c, d) = super::u32_from_u128(self);
sub2rev(&[d, c, b, a], &mut other.data[..]);
other.normalized()
}
#[inline]
fn sub(self, mut other: BigUint) -> BigUint {
while other.data.len() < 2 {
other.data.push(0);
}
let (hi, lo) = big_digit::from_doublebigdigit(self);
sub2rev(&[lo, hi], &mut other.data[..]);
other.normalized()
}
);
}
impl CheckedSub for BigUint {
#[inline]
fn checked_sub(&self, v: &BigUint) -> Option<BigUint> {
match self.cmp(v) {
Less => None,
Equal => Some(Self::ZERO),
Greater => Some(self.sub(v)),
}
}
}

268
vendor/num-bigint/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,268 @@
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Big Integer Types for Rust
//!
//! * A [`BigUint`] is unsigned and represented as a vector of digits.
//! * A [`BigInt`] is signed and is a combination of [`BigUint`] and [`Sign`].
//!
//! Common numerical operations are overloaded, so we can treat them
//! the same way we treat other numbers.
//!
//! ## Example
//!
//! ```rust
//! # fn main() {
//! use num_bigint::BigUint;
//! use num_traits::One;
//!
//! // Calculate large fibonacci numbers.
//! fn fib(n: usize) -> BigUint {
//! let mut f0 = BigUint::ZERO;
//! let mut f1 = BigUint::one();
//! for _ in 0..n {
//! let f2 = f0 + &f1;
//! f0 = f1;
//! f1 = f2;
//! }
//! f0
//! }
//!
//! // This is a very large number.
//! println!("fib(1000) = {}", fib(1000));
//! # }
//! ```
//!
//! It's easy to generate large random numbers:
//!
//! ```rust,ignore
//! use num_bigint::{ToBigInt, RandBigInt};
//!
//! let mut rng = rand::thread_rng();
//! let a = rng.gen_bigint(1000);
//!
//! let low = -10000.to_bigint().unwrap();
//! let high = 10000.to_bigint().unwrap();
//! let b = rng.gen_bigint_range(&low, &high);
//!
//! // Probably an even larger number.
//! println!("{}", a * b);
//! ```
//!
//! See the "Features" section for instructions for enabling random number generation.
//!
//! ## Features
//!
//! The `std` crate feature is enabled by default, which enables [`std::error::Error`]
//! implementations and some internal use of floating point approximations. This can be disabled by
//! depending on `num-bigint` with `default-features = false`. Either way, the `alloc` crate is
//! always required for heap allocation of the `BigInt`/`BigUint` digits.
//!
//! ### Random Generation
//!
//! `num-bigint` supports the generation of random big integers when the `rand`
//! feature is enabled. To enable it include rand as
//!
//! ```toml
//! rand = "0.8"
//! num-bigint = { version = "0.4", features = ["rand"] }
//! ```
//!
//! Note that you must use the version of `rand` that `num-bigint` is compatible
//! with: `0.8`.
//!
//! ### Arbitrary Big Integers
//!
//! `num-bigint` supports `arbitrary` and `quickcheck` features to implement
//! [`arbitrary::Arbitrary`] and [`quickcheck::Arbitrary`], respectively, for both `BigInt` and
//! `BigUint`. These are useful for fuzzing and other forms of randomized testing.
//!
//! ### Serialization
//!
//! The `serde` feature adds implementations of [`Serialize`][serde::Serialize] and
//! [`Deserialize`][serde::Deserialize] for both `BigInt` and `BigUint`. Their serialized data is
//! generated portably, regardless of platform differences like the internal digit size.
//!
//!
//! ## Compatibility
//!
//! The `num-bigint` crate is tested for rustc 1.60 and greater.
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(html_root_url = "https://docs.rs/num-bigint/0.4")]
#![warn(rust_2018_idioms)]
#![no_std]
#[macro_use]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
use core::fmt;
#[macro_use]
mod macros;
mod bigint;
mod bigrand;
mod biguint;
#[cfg(target_pointer_width = "32")]
type UsizePromotion = u32;
#[cfg(target_pointer_width = "64")]
type UsizePromotion = u64;
#[cfg(target_pointer_width = "32")]
type IsizePromotion = i32;
#[cfg(target_pointer_width = "64")]
type IsizePromotion = i64;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ParseBigIntError {
kind: BigIntErrorKind,
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum BigIntErrorKind {
Empty,
InvalidDigit,
}
impl ParseBigIntError {
fn __description(&self) -> &str {
use crate::BigIntErrorKind::*;
match self.kind {
Empty => "cannot parse integer from empty string",
InvalidDigit => "invalid digit found in string",
}
}
fn empty() -> Self {
ParseBigIntError {
kind: BigIntErrorKind::Empty,
}
}
fn invalid() -> Self {
ParseBigIntError {
kind: BigIntErrorKind::InvalidDigit,
}
}
}
impl fmt::Display for ParseBigIntError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.__description().fmt(f)
}
}
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl std::error::Error for ParseBigIntError {
fn description(&self) -> &str {
self.__description()
}
}
/// The error type returned when a checked conversion regarding big integer fails.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct TryFromBigIntError<T> {
original: T,
}
impl<T> TryFromBigIntError<T> {
fn new(original: T) -> Self {
TryFromBigIntError { original }
}
fn __description(&self) -> &str {
"out of range conversion regarding big integer attempted"
}
/// Extract the original value, if available. The value will be available
/// if the type before conversion was either [`BigInt`] or [`BigUint`].
pub fn into_original(self) -> T {
self.original
}
}
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<T> std::error::Error for TryFromBigIntError<T>
where
T: fmt::Debug,
{
fn description(&self) -> &str {
self.__description()
}
}
impl<T> fmt::Display for TryFromBigIntError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.__description().fmt(f)
}
}
pub use crate::biguint::BigUint;
pub use crate::biguint::ToBigUint;
pub use crate::biguint::U32Digits;
pub use crate::biguint::U64Digits;
pub use crate::bigint::BigInt;
pub use crate::bigint::Sign;
pub use crate::bigint::ToBigInt;
#[cfg(feature = "rand")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand")))]
pub use crate::bigrand::{RandBigInt, RandomBits, UniformBigInt, UniformBigUint};
mod big_digit {
// A [`BigDigit`] is a [`BigUint`]'s composing element.
cfg_digit!(
pub(crate) type BigDigit = u32;
pub(crate) type BigDigit = u64;
);
// A [`DoubleBigDigit`] is the internal type used to do the computations. Its
// size is the double of the size of [`BigDigit`].
cfg_digit!(
pub(crate) type DoubleBigDigit = u64;
pub(crate) type DoubleBigDigit = u128;
);
pub(crate) const BITS: u8 = BigDigit::BITS as u8;
pub(crate) const HALF_BITS: u8 = BITS / 2;
pub(crate) const HALF: BigDigit = (1 << HALF_BITS) - 1;
pub(crate) const MAX: BigDigit = BigDigit::MAX;
const LO_MASK: DoubleBigDigit = MAX as DoubleBigDigit;
#[inline]
fn get_hi(n: DoubleBigDigit) -> BigDigit {
(n >> BITS) as BigDigit
}
#[inline]
fn get_lo(n: DoubleBigDigit) -> BigDigit {
(n & LO_MASK) as BigDigit
}
/// Split one [`DoubleBigDigit`] into two [`BigDigit`]s.
#[inline]
pub(crate) fn from_doublebigdigit(n: DoubleBigDigit) -> (BigDigit, BigDigit) {
(get_hi(n), get_lo(n))
}
/// Join two [`BigDigit`]s into one [`DoubleBigDigit`].
#[inline]
pub(crate) fn to_doublebigdigit(hi: BigDigit, lo: BigDigit) -> DoubleBigDigit {
DoubleBigDigit::from(lo) | (DoubleBigDigit::from(hi) << BITS)
}
}

473
vendor/num-bigint/src/macros.rs vendored Normal file
View File

@@ -0,0 +1,473 @@
#![allow(unused_macros)]
macro_rules! cfg_32 {
($($any:tt)+) => {
#[cfg(not(target_pointer_width = "64"))] $($any)+
}
}
macro_rules! cfg_32_or_test {
($($any:tt)+) => {
#[cfg(any(not(target_pointer_width = "64"), test))] $($any)+
}
}
macro_rules! cfg_64 {
($($any:tt)+) => {
#[cfg(target_pointer_width = "64")] $($any)+
}
}
macro_rules! cfg_digit {
($item32:item $item64:item) => {
cfg_32!($item32);
cfg_64!($item64);
};
}
macro_rules! cfg_digit_expr {
($expr32:expr, $expr64:expr) => {
cfg_32!($expr32);
cfg_64!($expr64);
};
}
macro_rules! forward_val_val_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<$res> for $res {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
// forward to val-ref
$imp::$method(self, &other)
}
}
};
}
macro_rules! forward_val_val_binop_commutative {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<$res> for $res {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
// forward to val-ref, with the larger capacity as val
if self.capacity() >= other.capacity() {
$imp::$method(self, &other)
} else {
$imp::$method(other, &self)
}
}
}
};
}
macro_rules! forward_ref_val_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<$res> for &$res {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
// forward to ref-ref
$imp::$method(self, &other)
}
}
};
}
macro_rules! forward_ref_val_binop_commutative {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<$res> for &$res {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
// reverse, forward to val-ref
$imp::$method(other, self)
}
}
};
}
macro_rules! forward_val_ref_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<&$res> for $res {
type Output = $res;
#[inline]
fn $method(self, other: &$res) -> $res {
// forward to ref-ref
$imp::$method(&self, other)
}
}
};
}
macro_rules! forward_ref_ref_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<&$res> for &$res {
type Output = $res;
#[inline]
fn $method(self, other: &$res) -> $res {
// forward to val-ref
$imp::$method(self.clone(), other)
}
}
};
}
macro_rules! forward_ref_ref_binop_commutative {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<&$res> for &$res {
type Output = $res;
#[inline]
fn $method(self, other: &$res) -> $res {
// forward to val-ref, choosing the larger to clone
if self.len() >= other.len() {
$imp::$method(self.clone(), other)
} else {
$imp::$method(other.clone(), self)
}
}
}
};
}
macro_rules! forward_val_assign {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<$res> for $res {
#[inline]
fn $method(&mut self, other: $res) {
self.$method(&other);
}
}
};
}
macro_rules! forward_val_assign_scalar {
(impl $imp:ident for $res:ty, $scalar:ty, $method:ident) => {
impl $imp<$res> for $scalar {
#[inline]
fn $method(&mut self, other: $res) {
self.$method(&other);
}
}
};
}
/// use this if val_val_binop is already implemented and the reversed order is required
macro_rules! forward_scalar_val_val_binop_commutative {
(impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
impl $imp<$res> for $scalar {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
$imp::$method(other, self)
}
}
};
}
// Forward scalar to ref-val, when reusing storage is not helpful
macro_rules! forward_scalar_val_val_binop_to_ref_val {
(impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
impl $imp<$scalar> for $res {
type Output = $res;
#[inline]
fn $method(self, other: $scalar) -> $res {
$imp::$method(&self, other)
}
}
impl $imp<$res> for $scalar {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
$imp::$method(self, &other)
}
}
};
}
macro_rules! forward_scalar_ref_ref_binop_to_ref_val {
(impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
impl $imp<&$scalar> for &$res {
type Output = $res;
#[inline]
fn $method(self, other: &$scalar) -> $res {
$imp::$method(self, *other)
}
}
impl $imp<&$res> for &$scalar {
type Output = $res;
#[inline]
fn $method(self, other: &$res) -> $res {
$imp::$method(*self, other)
}
}
};
}
macro_rules! forward_scalar_val_ref_binop_to_ref_val {
(impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
impl $imp<&$scalar> for $res {
type Output = $res;
#[inline]
fn $method(self, other: &$scalar) -> $res {
$imp::$method(&self, *other)
}
}
impl $imp<$res> for &$scalar {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
$imp::$method(*self, &other)
}
}
};
}
macro_rules! forward_scalar_val_ref_binop_to_val_val {
(impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
impl $imp<&$scalar> for $res {
type Output = $res;
#[inline]
fn $method(self, other: &$scalar) -> $res {
$imp::$method(self, *other)
}
}
impl $imp<$res> for &$scalar {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
$imp::$method(*self, other)
}
}
};
}
macro_rules! forward_scalar_ref_val_binop_to_val_val {
(impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
impl $imp<$scalar> for &$res {
type Output = $res;
#[inline]
fn $method(self, other: $scalar) -> $res {
$imp::$method(self.clone(), other)
}
}
impl $imp<&$res> for $scalar {
type Output = $res;
#[inline]
fn $method(self, other: &$res) -> $res {
$imp::$method(self, other.clone())
}
}
};
}
macro_rules! forward_scalar_ref_ref_binop_to_val_val {
(impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
impl $imp<&$scalar> for &$res {
type Output = $res;
#[inline]
fn $method(self, other: &$scalar) -> $res {
$imp::$method(self.clone(), *other)
}
}
impl $imp<&$res> for &$scalar {
type Output = $res;
#[inline]
fn $method(self, other: &$res) -> $res {
$imp::$method(*self, other.clone())
}
}
};
}
macro_rules! promote_scalars {
(impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
$(
forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
impl $imp<$scalar> for $res {
type Output = $res;
#[allow(clippy::cast_lossless)]
#[inline]
fn $method(self, other: $scalar) -> $res {
$imp::$method(self, other as $promo)
}
}
impl $imp<$res> for $scalar {
type Output = $res;
#[allow(clippy::cast_lossless)]
#[inline]
fn $method(self, other: $res) -> $res {
$imp::$method(self as $promo, other)
}
}
)*
}
}
macro_rules! promote_scalars_assign {
(impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
$(
impl $imp<$scalar> for $res {
#[allow(clippy::cast_lossless)]
#[inline]
fn $method(&mut self, other: $scalar) {
self.$method(other as $promo);
}
}
)*
}
}
macro_rules! promote_unsigned_scalars {
(impl $imp:ident for $res:ty, $method:ident) => {
promote_scalars!(impl $imp<u32> for $res, $method, u8, u16);
promote_scalars!(impl $imp<UsizePromotion> for $res, $method, usize);
}
}
macro_rules! promote_unsigned_scalars_assign {
(impl $imp:ident for $res:ty, $method:ident) => {
promote_scalars_assign!(impl $imp<u32> for $res, $method, u8, u16);
promote_scalars_assign!(impl $imp<UsizePromotion> for $res, $method, usize);
}
}
macro_rules! promote_signed_scalars {
(impl $imp:ident for $res:ty, $method:ident) => {
promote_scalars!(impl $imp<i32> for $res, $method, i8, i16);
promote_scalars!(impl $imp<IsizePromotion> for $res, $method, isize);
}
}
macro_rules! promote_signed_scalars_assign {
(impl $imp:ident for $res:ty, $method:ident) => {
promote_scalars_assign!(impl $imp<i32> for $res, $method, i8, i16);
promote_scalars_assign!(impl $imp<IsizePromotion> for $res, $method, isize);
}
}
// Forward everything to ref-ref, when reusing storage is not helpful
macro_rules! forward_all_binop_to_ref_ref {
(impl $imp:ident for $res:ty, $method:ident) => {
forward_val_val_binop!(impl $imp for $res, $method);
forward_val_ref_binop!(impl $imp for $res, $method);
forward_ref_val_binop!(impl $imp for $res, $method);
};
}
// Forward everything to val-ref, so LHS storage can be reused
macro_rules! forward_all_binop_to_val_ref {
(impl $imp:ident for $res:ty, $method:ident) => {
forward_val_val_binop!(impl $imp for $res, $method);
forward_ref_val_binop!(impl $imp for $res, $method);
forward_ref_ref_binop!(impl $imp for $res, $method);
};
}
// Forward everything to val-ref, commutatively, so either LHS or RHS storage can be reused
macro_rules! forward_all_binop_to_val_ref_commutative {
(impl $imp:ident for $res:ty, $method:ident) => {
forward_val_val_binop_commutative!(impl $imp for $res, $method);
forward_ref_val_binop_commutative!(impl $imp for $res, $method);
forward_ref_ref_binop_commutative!(impl $imp for $res, $method);
};
}
macro_rules! forward_all_scalar_binop_to_ref_val {
(impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
forward_scalar_val_val_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
forward_scalar_val_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
forward_scalar_ref_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
}
}
macro_rules! forward_all_scalar_binop_to_val_val {
(impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
forward_scalar_val_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
forward_scalar_ref_val_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
forward_scalar_ref_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
}
}
macro_rules! forward_all_scalar_binop_to_val_val_commutative {
(impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
forward_scalar_val_val_binop_commutative!(impl $imp<$scalar> for $res, $method);
forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
}
}
macro_rules! promote_all_scalars {
(impl $imp:ident for $res:ty, $method:ident) => {
promote_unsigned_scalars!(impl $imp for $res, $method);
promote_signed_scalars!(impl $imp for $res, $method);
}
}
macro_rules! promote_all_scalars_assign {
(impl $imp:ident for $res:ty, $method:ident) => {
promote_unsigned_scalars_assign!(impl $imp for $res, $method);
promote_signed_scalars_assign!(impl $imp for $res, $method);
}
}
macro_rules! impl_sum_iter_type {
($res:ty) => {
impl<T> Sum<T> for $res
where
$res: Add<T, Output = $res>,
{
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = T>,
{
iter.fold(Self::ZERO, <$res>::add)
}
}
};
}
macro_rules! impl_product_iter_type {
($res:ty) => {
impl<T> Product<T> for $res
where
$res: Mul<T, Output = $res>,
{
fn product<I>(iter: I) -> Self
where
I: Iterator<Item = T>,
{
iter.fold(One::one(), <$res>::mul)
}
}
};
}

Binary file not shown.

View File

@@ -0,0 +1 @@
{"name":"num-bigint","vers":"0.4.6","deps":[{"name":"arbitrary","req":"^1","features":[],"optional":true,"default_features":false,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"num-integer","req":"^0.1.46","features":["i128"],"optional":false,"default_features":false,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"num-traits","req":"^0.2.18","features":["i128"],"optional":false,"default_features":false,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"quickcheck","req":"^1","features":[],"optional":true,"default_features":false,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"rand","req":"^0.8","features":[],"optional":true,"default_features":false,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false},{"name":"serde","req":"^1.0","features":[],"optional":true,"default_features":false,"target":null,"kind":"normal","registry":"https://github.com/rust-lang/crates.io-index","package":null,"public":null,"artifact":null,"bindep_target":null,"lib":false}],"features":{"arbitrary":["dep:arbitrary"],"default":["std"],"quickcheck":["dep:quickcheck"],"rand":["dep:rand"],"serde":["dep:serde"],"std":["num-integer/std","num-traits/std"]},"features2":null,"cksum":"f125d0b0359be772552dd92d1af463cca7c3283451842a808b97f8aaaa3808ad","yanked":null,"links":null,"rust_version":null,"v":2}

1475
vendor/num-bigint/tests/bigint.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,177 @@
use num_bigint::{BigInt, Sign, ToBigInt};
use num_traits::ToPrimitive;
enum ValueVec {
N,
P(&'static [u32]),
M(&'static [u32]),
}
use crate::ValueVec::*;
impl ToBigInt for ValueVec {
fn to_bigint(&self) -> Option<BigInt> {
match self {
&N => Some(BigInt::from_slice(Sign::NoSign, &[])),
&P(s) => Some(BigInt::from_slice(Sign::Plus, s)),
&M(s) => Some(BigInt::from_slice(Sign::Minus, s)),
}
}
}
// a, !a
const NOT_VALUES: &[(ValueVec, ValueVec)] = &[
(N, M(&[1])),
(P(&[1]), M(&[2])),
(P(&[2]), M(&[3])),
(P(&[!0 - 2]), M(&[!0 - 1])),
(P(&[!0 - 1]), M(&[!0])),
(P(&[!0]), M(&[0, 1])),
(P(&[0, 1]), M(&[1, 1])),
(P(&[1, 1]), M(&[2, 1])),
];
// a, b, a & b, a | b, a ^ b
const BITWISE_VALUES: &[(ValueVec, ValueVec, ValueVec, ValueVec, ValueVec)] = &[
(N, N, N, N, N),
(N, P(&[1]), N, P(&[1]), P(&[1])),
(N, P(&[!0]), N, P(&[!0]), P(&[!0])),
(N, P(&[0, 1]), N, P(&[0, 1]), P(&[0, 1])),
(N, M(&[1]), N, M(&[1]), M(&[1])),
(N, M(&[!0]), N, M(&[!0]), M(&[!0])),
(N, M(&[0, 1]), N, M(&[0, 1]), M(&[0, 1])),
(P(&[1]), P(&[!0]), P(&[1]), P(&[!0]), P(&[!0 - 1])),
(P(&[!0]), P(&[!0]), P(&[!0]), P(&[!0]), N),
(P(&[!0]), P(&[1, 1]), P(&[1]), P(&[!0, 1]), P(&[!0 - 1, 1])),
(P(&[1]), M(&[!0]), P(&[1]), M(&[!0]), M(&[0, 1])),
(P(&[!0]), M(&[1]), P(&[!0]), M(&[1]), M(&[0, 1])),
(P(&[!0]), M(&[!0]), P(&[1]), M(&[1]), M(&[2])),
(P(&[!0]), M(&[1, 1]), P(&[!0]), M(&[1, 1]), M(&[0, 2])),
(P(&[1, 1]), M(&[!0]), P(&[1, 1]), M(&[!0]), M(&[0, 2])),
(M(&[1]), M(&[!0]), M(&[!0]), M(&[1]), P(&[!0 - 1])),
(M(&[!0]), M(&[!0]), M(&[!0]), M(&[!0]), N),
(M(&[!0]), M(&[1, 1]), M(&[!0, 1]), M(&[1]), P(&[!0 - 1, 1])),
];
const I32_MIN: i64 = i32::MIN as i64;
const I32_MAX: i64 = i32::MAX as i64;
const U32_MAX: i64 = u32::MAX as i64;
// some corner cases
const I64_VALUES: &[i64] = &[
i64::MIN,
i64::MIN + 1,
i64::MIN + 2,
i64::MIN + 3,
-U32_MAX - 3,
-U32_MAX - 2,
-U32_MAX - 1,
-U32_MAX,
-U32_MAX + 1,
-U32_MAX + 2,
-U32_MAX + 3,
I32_MIN - 3,
I32_MIN - 2,
I32_MIN - 1,
I32_MIN,
I32_MIN + 1,
I32_MIN + 2,
I32_MIN + 3,
-3,
-2,
-1,
0,
1,
2,
3,
I32_MAX - 3,
I32_MAX - 2,
I32_MAX - 1,
I32_MAX,
I32_MAX + 1,
I32_MAX + 2,
I32_MAX + 3,
U32_MAX - 3,
U32_MAX - 2,
U32_MAX - 1,
U32_MAX,
U32_MAX + 1,
U32_MAX + 2,
U32_MAX + 3,
i64::MAX - 3,
i64::MAX - 2,
i64::MAX - 1,
i64::MAX,
];
#[test]
fn test_not() {
for &(ref a, ref not) in NOT_VALUES.iter() {
let a = a.to_bigint().unwrap();
let not = not.to_bigint().unwrap();
// sanity check for tests that fit in i64
if let (Some(prim_a), Some(prim_not)) = (a.to_i64(), not.to_i64()) {
assert_eq!(!prim_a, prim_not);
}
assert_eq!(!a.clone(), not, "!{:x}", a);
assert_eq!(!not.clone(), a, "!{:x}", not);
}
}
#[test]
fn test_not_i64() {
for &prim_a in I64_VALUES.iter() {
let a = prim_a.to_bigint().unwrap();
let not = (!prim_a).to_bigint().unwrap();
assert_eq!(!a.clone(), not, "!{:x}", a);
}
}
#[test]
fn test_bitwise() {
for &(ref a, ref b, ref and, ref or, ref xor) in BITWISE_VALUES.iter() {
let a = a.to_bigint().unwrap();
let b = b.to_bigint().unwrap();
let and = and.to_bigint().unwrap();
let or = or.to_bigint().unwrap();
let xor = xor.to_bigint().unwrap();
// sanity check for tests that fit in i64
if let (Some(prim_a), Some(prim_b)) = (a.to_i64(), b.to_i64()) {
if let Some(prim_and) = and.to_i64() {
assert_eq!(prim_a & prim_b, prim_and);
}
if let Some(prim_or) = or.to_i64() {
assert_eq!(prim_a | prim_b, prim_or);
}
if let Some(prim_xor) = xor.to_i64() {
assert_eq!(prim_a ^ prim_b, prim_xor);
}
}
assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b);
assert_eq!(b.clone() & &a, and, "{:x} & {:x}", b, a);
assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b);
assert_eq!(b.clone() | &a, or, "{:x} | {:x}", b, a);
assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b);
assert_eq!(b.clone() ^ &a, xor, "{:x} ^ {:x}", b, a);
}
}
#[test]
fn test_bitwise_i64() {
for &prim_a in I64_VALUES.iter() {
let a = prim_a.to_bigint().unwrap();
for &prim_b in I64_VALUES.iter() {
let b = prim_b.to_bigint().unwrap();
let and = (prim_a & prim_b).to_bigint().unwrap();
let or = (prim_a | prim_b).to_bigint().unwrap();
let xor = (prim_a ^ prim_b).to_bigint().unwrap();
assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b);
assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b);
assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b);
}
}
}

157
vendor/num-bigint/tests/bigint_scalar.rs vendored Normal file
View File

@@ -0,0 +1,157 @@
use num_bigint::BigInt;
use num_bigint::Sign::Plus;
use num_traits::{One, Signed, ToPrimitive, Zero};
use std::ops::Neg;
use std::panic::catch_unwind;
mod consts;
use crate::consts::*;
#[macro_use]
mod macros;
#[test]
fn test_scalar_add() {
fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
let (x, y, z) = (x.clone(), y.clone(), z.clone());
assert_signed_scalar_op!(x + y == z);
assert_signed_scalar_assign_op!(x += y == z);
}
for elm in SUM_TRIPLES.iter() {
let (a_vec, b_vec, c_vec) = *elm;
let a = BigInt::from_slice(Plus, a_vec);
let b = BigInt::from_slice(Plus, b_vec);
let c = BigInt::from_slice(Plus, c_vec);
let (na, nb, nc) = (-&a, -&b, -&c);
check(&a, &b, &c);
check(&b, &a, &c);
check(&c, &na, &b);
check(&c, &nb, &a);
check(&a, &nc, &nb);
check(&b, &nc, &na);
check(&na, &nb, &nc);
check(&a, &na, &Zero::zero());
}
}
#[test]
fn test_scalar_sub() {
fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
let (x, y, z) = (x.clone(), y.clone(), z.clone());
assert_signed_scalar_op!(x - y == z);
assert_signed_scalar_assign_op!(x -= y == z);
}
for elm in SUM_TRIPLES.iter() {
let (a_vec, b_vec, c_vec) = *elm;
let a = BigInt::from_slice(Plus, a_vec);
let b = BigInt::from_slice(Plus, b_vec);
let c = BigInt::from_slice(Plus, c_vec);
let (na, nb, nc) = (-&a, -&b, -&c);
check(&c, &a, &b);
check(&c, &b, &a);
check(&nb, &a, &nc);
check(&na, &b, &nc);
check(&b, &na, &c);
check(&a, &nb, &c);
check(&nc, &na, &nb);
check(&a, &a, &Zero::zero());
}
}
#[test]
fn test_scalar_mul() {
fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
let (x, y, z) = (x.clone(), y.clone(), z.clone());
assert_signed_scalar_op!(x * y == z);
assert_signed_scalar_assign_op!(x *= y == z);
}
for elm in MUL_TRIPLES.iter() {
let (a_vec, b_vec, c_vec) = *elm;
let a = BigInt::from_slice(Plus, a_vec);
let b = BigInt::from_slice(Plus, b_vec);
let c = BigInt::from_slice(Plus, c_vec);
let (na, nb, nc) = (-&a, -&b, -&c);
check(&a, &b, &c);
check(&b, &a, &c);
check(&na, &nb, &c);
check(&na, &b, &nc);
check(&nb, &a, &nc);
}
}
#[test]
fn test_scalar_div_rem() {
fn check_sub(a: &BigInt, b: u32, ans_q: &BigInt, ans_r: &BigInt) {
let (q, r) = (a / b, a % b);
if !r.is_zero() {
assert_eq!(r.sign(), a.sign());
}
assert!(r.abs() <= BigInt::from(b));
assert!(*a == b * &q + &r);
assert!(q == *ans_q);
assert!(r == *ans_r);
let b = BigInt::from(b);
let (a, ans_q, ans_r) = (a.clone(), ans_q.clone(), ans_r.clone());
assert_signed_scalar_op!(a / b == ans_q);
assert_signed_scalar_op!(a % b == ans_r);
assert_signed_scalar_assign_op!(a /= b == ans_q);
assert_signed_scalar_assign_op!(a %= b == ans_r);
let nb = -b;
assert_signed_scalar_op!(a / nb == -ans_q.clone());
assert_signed_scalar_op!(a % nb == ans_r);
assert_signed_scalar_assign_op!(a /= nb == -ans_q.clone());
assert_signed_scalar_assign_op!(a %= nb == ans_r);
}
fn check(a: &BigInt, b: u32, q: &BigInt, r: &BigInt) {
check_sub(a, b, q, r);
check_sub(&a.neg(), b, &q.neg(), &r.neg());
}
for elm in MUL_TRIPLES.iter() {
let (a_vec, b_vec, c_vec) = *elm;
let a = BigInt::from_slice(Plus, a_vec);
let b = BigInt::from_slice(Plus, b_vec);
let c = BigInt::from_slice(Plus, c_vec);
if a_vec.len() == 1 && a_vec[0] != 0 {
let a = a_vec[0];
check(&c, a, &b, &Zero::zero());
}
if b_vec.len() == 1 && b_vec[0] != 0 {
let b = b_vec[0];
check(&c, b, &a, &Zero::zero());
}
}
for elm in DIV_REM_QUADRUPLES.iter() {
let (a_vec, b_vec, c_vec, d_vec) = *elm;
let a = BigInt::from_slice(Plus, a_vec);
let c = BigInt::from_slice(Plus, c_vec);
let d = BigInt::from_slice(Plus, d_vec);
if b_vec.len() == 1 && b_vec[0] != 0 {
let b = b_vec[0];
check(&a, b, &c, &d);
}
}
}
#[test]
fn test_scalar_div_rem_zero() {
catch_unwind(|| BigInt::zero() / 0u32).unwrap_err();
catch_unwind(|| BigInt::zero() % 0u32).unwrap_err();
catch_unwind(|| BigInt::one() / 0u32).unwrap_err();
catch_unwind(|| BigInt::one() % 0u32).unwrap_err();
}

1921
vendor/num-bigint/tests/biguint.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,123 @@
use num_bigint::BigUint;
use num_traits::{One, ToPrimitive, Zero};
use std::panic::catch_unwind;
mod consts;
use crate::consts::*;
#[macro_use]
mod macros;
#[test]
fn test_scalar_add() {
fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
let (x, y, z) = (x.clone(), y.clone(), z.clone());
assert_unsigned_scalar_op!(x + y == z);
assert_unsigned_scalar_assign_op!(x += y == z);
}
for elm in SUM_TRIPLES.iter() {
let (a_vec, b_vec, c_vec) = *elm;
let a = BigUint::from_slice(a_vec);
let b = BigUint::from_slice(b_vec);
let c = BigUint::from_slice(c_vec);
check(&a, &b, &c);
check(&b, &a, &c);
}
}
#[test]
fn test_scalar_sub() {
fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
let (x, y, z) = (x.clone(), y.clone(), z.clone());
assert_unsigned_scalar_op!(x - y == z);
assert_unsigned_scalar_assign_op!(x -= y == z);
}
for elm in SUM_TRIPLES.iter() {
let (a_vec, b_vec, c_vec) = *elm;
let a = BigUint::from_slice(a_vec);
let b = BigUint::from_slice(b_vec);
let c = BigUint::from_slice(c_vec);
check(&c, &a, &b);
check(&c, &b, &a);
}
}
#[test]
fn test_scalar_mul() {
fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
let (x, y, z) = (x.clone(), y.clone(), z.clone());
assert_unsigned_scalar_op!(x * y == z);
assert_unsigned_scalar_assign_op!(x *= y == z);
}
for elm in MUL_TRIPLES.iter() {
let (a_vec, b_vec, c_vec) = *elm;
let a = BigUint::from_slice(a_vec);
let b = BigUint::from_slice(b_vec);
let c = BigUint::from_slice(c_vec);
check(&a, &b, &c);
check(&b, &a, &c);
}
}
#[test]
fn test_scalar_rem_noncommutative() {
assert_eq!(5u8 % BigUint::from(7u8), BigUint::from(5u8));
assert_eq!(BigUint::from(5u8) % 7u8, BigUint::from(5u8));
}
#[test]
fn test_scalar_div_rem() {
fn check(x: &BigUint, y: &BigUint, z: &BigUint, r: &BigUint) {
let (x, y, z, r) = (x.clone(), y.clone(), z.clone(), r.clone());
assert_unsigned_scalar_op!(x / y == z);
assert_unsigned_scalar_op!(x % y == r);
assert_unsigned_scalar_assign_op!(x /= y == z);
assert_unsigned_scalar_assign_op!(x %= y == r);
}
for elm in MUL_TRIPLES.iter() {
let (a_vec, b_vec, c_vec) = *elm;
let a = BigUint::from_slice(a_vec);
let b = BigUint::from_slice(b_vec);
let c = BigUint::from_slice(c_vec);
if !a.is_zero() {
check(&c, &a, &b, &Zero::zero());
}
if !b.is_zero() {
check(&c, &b, &a, &Zero::zero());
}
}
for elm in DIV_REM_QUADRUPLES.iter() {
let (a_vec, b_vec, c_vec, d_vec) = *elm;
let a = BigUint::from_slice(a_vec);
let b = BigUint::from_slice(b_vec);
let c = BigUint::from_slice(c_vec);
let d = BigUint::from_slice(d_vec);
if !b.is_zero() {
check(&a, &b, &c, &d);
assert_unsigned_scalar_op!(a / b == c);
assert_unsigned_scalar_op!(a % b == d);
assert_unsigned_scalar_assign_op!(a /= b == c);
assert_unsigned_scalar_assign_op!(a %= b == d);
}
}
}
#[test]
fn test_scalar_div_rem_zero() {
catch_unwind(|| BigUint::zero() / 0u32).unwrap_err();
catch_unwind(|| BigUint::zero() % 0u32).unwrap_err();
catch_unwind(|| BigUint::one() / 0u32).unwrap_err();
catch_unwind(|| BigUint::one() % 0u32).unwrap_err();
}

51
vendor/num-bigint/tests/consts/mod.rs vendored Normal file
View File

@@ -0,0 +1,51 @@
#![allow(unused)]
pub const N1: u32 = -1i32 as u32;
pub const N2: u32 = -2i32 as u32;
pub const SUM_TRIPLES: &[(&[u32], &[u32], &[u32])] = &[
(&[], &[], &[]),
(&[], &[1], &[1]),
(&[1], &[1], &[2]),
(&[1], &[1, 1], &[2, 1]),
(&[1], &[N1], &[0, 1]),
(&[1], &[N1, N1], &[0, 0, 1]),
(&[N1, N1], &[N1, N1], &[N2, N1, 1]),
(&[1, 1, 1], &[N1, N1], &[0, 1, 2]),
(&[2, 2, 1], &[N1, N2], &[1, 1, 2]),
(&[1, 2, 2, 1], &[N1, N2], &[0, 1, 3, 1]),
];
pub const M: u32 = u32::MAX;
pub const MUL_TRIPLES: &[(&[u32], &[u32], &[u32])] = &[
(&[], &[], &[]),
(&[], &[1], &[]),
(&[2], &[], &[]),
(&[1], &[1], &[1]),
(&[2], &[3], &[6]),
(&[1], &[1, 1, 1], &[1, 1, 1]),
(&[1, 2, 3], &[3], &[3, 6, 9]),
(&[1, 1, 1], &[N1], &[N1, N1, N1]),
(&[1, 2, 3], &[N1], &[N1, N2, N2, 2]),
(&[1, 2, 3, 4], &[N1], &[N1, N2, N2, N2, 3]),
(&[N1], &[N1], &[1, N2]),
(&[N1, N1], &[N1], &[1, N1, N2]),
(&[N1, N1, N1], &[N1], &[1, N1, N1, N2]),
(&[N1, N1, N1, N1], &[N1], &[1, N1, N1, N1, N2]),
(&[M / 2 + 1], &[2], &[0, 1]),
(&[0, M / 2 + 1], &[2], &[0, 0, 1]),
(&[1, 2], &[1, 2, 3], &[1, 4, 7, 6]),
(&[N1, N1], &[N1, N1, N1], &[1, 0, N1, N2, N1]),
(&[N1, N1, N1], &[N1, N1, N1, N1], &[1, 0, 0, N1, N2, N1, N1]),
(&[0, 0, 1], &[1, 2, 3], &[0, 0, 1, 2, 3]),
(&[0, 0, 1], &[0, 0, 0, 1], &[0, 0, 0, 0, 0, 1]),
];
pub const DIV_REM_QUADRUPLES: &[(&[u32], &[u32], &[u32], &[u32])] = &[
(&[1], &[2], &[], &[1]),
(&[3], &[2], &[1], &[1]),
(&[1, 1], &[2], &[M / 2 + 1], &[1]),
(&[1, 1, 1], &[2], &[M / 2 + 1, M / 2 + 1], &[1]),
(&[0, 1], &[N1], &[1], &[1]),
(&[N1, N1], &[N2], &[2, 1], &[3]),
];

185
vendor/num-bigint/tests/fuzzed.rs vendored Normal file
View File

@@ -0,0 +1,185 @@
//! Check buggy inputs that were found by fuzzing
use num_bigint::BigUint;
use num_traits::Num;
#[test]
fn fuzzed_mul_1() {
let hex1 = "\
cd6839ee857cf791a40494c2e522846eefbca9eca9912fdc1feed4561dbde75c75f1ddca2325ebb1\
b9cd6eae07308578e58e57f4ddd7dc239b4fd347b883e37d87232a8e5d5a8690c8dba69c97fe8ac4\
58add18be7e460e03c9d1ae8223db53d20681a4027ffc17d1e43b764791c4db5ff7add849da7e378\
ac8d9be0e8b517c490da3c0f944b6a52a0c5dc5217c71da8eec35d2c3110d8b041d2b52f3e2a8904\
abcaaca517a8f2ef6cd26ceadd39a1cf9f770bc08f55f5a230cd81961348bb18534245430699de77\
d93b805153cffd05dfd0f2cfc2332888cec9c5abf3ece9b4d7886ad94c784bf74fce12853b2a9a75\
b62a845151a703446cc20300eafe7332330e992ae88817cd6ccef8877b66a7252300a4664d7074da\
181cd9fd502ea1cd71c0b02db3c009fe970a7d226382cdba5b5576c5c0341694681c7adc4ca2d059\
d9a6b300957a2235a4eb6689b71d34dcc4037b520eabd2c8b66604bb662fe2bcf533ba8d242dbc91\
f04c1795b9f0fee800d197d8c6e998248b15855a9602b76cb3f94b148d8f71f7d6225b79d63a8e20\
8ec8f0fa56a1c381b6c09bad9886056aec17fc92b9bb0f8625fd3444e40cccc2ede768ddb23c66ad\
59a680a26a26d519d02e4d46ce93cce9e9dd86702bdd376abae0959a0e8e418aa507a63fafb8f422\
83b03dc26f371c5e261a8f90f3ac9e2a6bcc7f0a39c3f73043b5aa5a950d4e945e9f68b2c2e593e3\
b995be174714c1967b71f579043f89bfce37437af9388828a3ba0465c88954110cae6d38b638e094\
13c15c9faddd6fb63623fd50e06d00c4d5954e787158b3e4eea7e9fae8b189fa8a204b23ac2f7bbc\
b601189c0df2075977c2424336024ba3594172bea87f0f92beb20276ce8510c8ef2a4cd5ede87e7e\
38b3fa49d66fbcd322be686a349c24919f4000000000000000000000000000000000000000000000\
000000000000000000000000000000000";
let hex2 = "\
40000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000007";
// Result produced independently in Python
let hex_result = "\
335a0e7ba15f3de469012530b948a11bbbef2a7b2a644bf707fbb515876f79d71d7c777288c97aec\
6e735bab81cc215e396395fd3775f708e6d3f4d1ee20f8df61c8caa39756a1a43236e9a725ffa2b1\
162b7462f9f918380f2746ba088f6d4f481a069009fff05f4790edd91e47136d7fdeb7612769f8de\
2b2366f83a2d45f124368f03e512da94a831771485f1c76a3bb0d74b0c44362c1074ad4bcf8aa241\
2af2ab2945ea3cbbdb349b3ab74e6873e7ddc2f023d57d688c33606584d22ec614d09150c1a6779d\
f64ee01454f3ff4177f43cb3f08cca2233b2716afcfb3a6d35e21ab6531e12fdd3f384a14ecaa69d\
6d8aa1145469c0d11b3080c03abf9ccc8cc3a64aba2205f35b33be21ded9a9c948c02919935c1d36\
8607367f540ba8735c702c0b6cf0027fa5c29f4898e0b36e96d55db1700d05a51a071eb71328b416\
7669acc0255e888d693ad9a26dc74d373100ded483aaf4b22d99812ed98bf8af3d4ceea3490b6f24\
7c1305e56e7c3fba003465f631ba660922c56156a580addb2cfe52c52363dc7df58896de758ea388\
23b23c3e95a870e06db026eb6621815abb05ff24ae6ec3e1897f4d1139033330bb79da376c8f19ab\
5669a0289a89b546740b9351b3a4f33a7a77619c0af74ddaaeb8256683a39062a941e98febee3d08\
a0ec0f709bcdc7178986a3e43ceb278a9af31fc28e70fdcc10ed6a96a54353a517a7da2cb0b964f8\
ee656f85d1c530659edc7d5e410fe26ff38dd0debe4e220a28ee811972225504432b9b4e2d8e3825\
04f05727eb775bed8d88ff54381b40313565539e1c562cf93ba9fa7eba2c627ea28812c8eb0bdeef\
2d804627037c81d65df09090cd8092e8d6505cafaa1fc3e4afac809db3a144323bca93358117f935\
13d3695771180f461cf38bb995b531c9e072f84f04df87ce5ad0315387399d1086f60971dc149e06\
c23253a64e46e467b210e704f93f2ec6f60b9b386eb1f629e48d79adf57e018e4827f5cb5e6cc0ba\
d3573ea621a84bbc58efaff4abe2d8b7c117fe4a6bd3da03bf4fc61ff9fc5c0ea04f97384cb7df43\
265cf3a65ff5f7a46d0e0fe8426569063ea671cf9e87578c355775ecd1ccc2f44ab329bf20b28ab8\
83a59ea48bf9c0fa6c0c936cad5c415243eb59b76f559e8b1a86fd1daa46cfe4d52e351546f0a082\
394aafeb291eb6a3ae4f661bbda78467b3ab7a63f1e4baebf1174a13c32ea281a49e2a3937fb299e\
393b9116def94e15066cf5265f6566302c5bb8a69df9a8cbb45fce9203f5047ecc1e1331f6a8c9f5\
ed31466c9e1c44d13fea4045f621496bf0b893a0187f563f68416c9e0ed8c75c061873b274f38ee5\
041656ef77826fcdc401cc72095c185f3e66b2c37cfcca211fcb4f332ab46a19dbfd4027fd9214a5\
181596f85805bb26ed706328ffcd96a57a1a1303f8ebd10d8fdeec1dc6daf08054db99e2e3e77e96\
d85e6c588bff4441bf2baa25ec74a7e803141d6cab09ec6de23c5999548153de0fdfa6cebd738d84\
70e70fd3b4b1441cefa60a9a65650ead11330c83eb1c24173665e3caca83358bbdce0eacf199d1b0\
510a81c6930ab9ecf6a9b85328f2977947945bc251d9f7a87a135d260e965bdce354470b3a131832\
a2f1914b1d601db64f1dbcc43ea382d85cd08bb91c7a161ec87bc14c7758c4fc8cfb8e240c8a4988\
5dc10e0dfb7afbed3622fb0561d715254b196ceb42869765dc5cdac5d9c6e20df9b54c6228fa07ac\
44619e3372464fcfd67a10117770ca23369b796d0336de113fa5a3757e8a2819d9815b75738cebd8\
04dd0e29c5f334dae77044fffb5ac000000000000000000000000000000000000000000000000000\
000000000000000000000000000";
let bn1 = &BigUint::from_str_radix(hex1, 16).unwrap();
let bn2 = &BigUint::from_str_radix(hex2, 16).unwrap();
let result = BigUint::from_str_radix(hex_result, 16).unwrap();
assert_eq!(bn1 * bn2, result);
assert_eq!(bn2 * bn1, result);
}
#[test]
fn fuzzed_mul_2() {
let hex_a = "\
812cff04ff812cff04ff8180ff80ffff11ff80ff2cff04ff812cff04ff812cff04ff81232cff047d\
ff04ff812cff04ff812cff04ff812cff047f812cff04ff8180ff2cff04ff04ff8180ff2cff04ff04\
ff812cbf04ff8180ff2cff04ff812cff0401010000000000000000ffff1a80ffc006c70084ffff80\
ffc0064006000084ffff72ffc020ffffffffffff06d709000000dbffffffc799999999b999999999\
99999999000084ffff72ffc02006e1ffffffc70900ffffff00f312ff80ebffffff6f505f6c2e6712\
108970ffff5f6c6f6727020000000000007400000000000000000000a50000000000000000000000\
000000000000000000000000ff812cff04ff812cff2c04ff812cff8180ff2cff04ff04ff818b8b8b\
8b8b8b8b8b8b8b8b8b8b8b8b8b06c70084ffff80ffc006c700847fff80ffc006c700ffff12c70084\
ffff80ffc0060000000000000056ff00c789bfff80ffc006c70084ffff80ffc006c700ffff840100\
00000000001289ffc08b8b8b8b8b8b8b2c";
let hex_b = "\
7ed300fb007ed300fb007e7f00db00fb007ed3007ed300fb007edcd300fb8200fb007ed300fb007e\
d300fb007ed300fb007ed300fbfeffffffffffffa8fb007e7f00d300fb00fb007ed340fb007e7f00\
00fb007ed300fb007ed300fb007e7f00d300fb00fb007e7f00d300fb007efb007e7f00d300fb007e\
d300fb007e7f0097d300fb00bf007ed300fb007ed300fb00fb00fb00fbffffffffffffffffffff00\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
000000000000df9b3900ff908fa08d9e968c9a0000e7fffb7fff0000003fd9004c90d8f600de7f00\
3fdf9b3900ff908fa08d9e968cf9b9ff0000ed38ff7b00007f003ff9ffffffffffffffa900ff3876\
000078003ff938ff7b00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d300\
fb00fb007e7f00d300fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb017e\
d300fb007ed300fb007edcd300fb8200fb007e0000e580";
let hex_c = "\
7b00387ffff938ff7b80007f003ff9b9ff00fdec38ff7b00007f003ff9ffffffffffffffa900ff38\
76000078003ff938ff7b00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d3\
00fb00fb007e7f00d300fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb01\
7ed300fb007ed300fb007edcd300fb8200fb007e000000ee7f003f0000007b00387ffff938ff7b80\
007f003ff9b9ff00fdec38ff7b00007f003ff9ffffffffffffffa900ff3876000078003ff938ff7b\
00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d300fb00fb007e7f00d300\
fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb017ed300fb007ed300fb00\
7edcd300fb8200fb007e000000ee7f003f000000000000000000000000000000002a000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
00000000000000000000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000df9b3900ff908fa08d9e968c9a0000e7fffb7fff0000003fd9004c90d8\
f600de7f003fdf9b3900ff908fa08d9e968c9a0000e7fffa7fff0000004005004c90d8f600de908f\
dcd300fb8200fb007e0000e57f003ff938ff7b00007f003d7ed300fb007ed300fb007ed300fb007e\
fa00fb007ed300fbf9ffffffffffffffa900ff387600007f003ff938ff7b00007f003ff938fd0000\
7bfeffffffffffffed76003f74747474747474d300fc";
// Result produced independently in Python
let hex_result = "\
1ebf6415da7ac71a689cd450727b7a361402a1626e0b6cd057e0e2a77d4cb722c1b7d0cbd73a7c07\
d756813fe97d73d5905c4a26404c7162769ba2dbc1e2742855a1db803e2d2c2fddc77c0598cc70fe\
066fd4b81cae3e23c55b4795de63acacd1343cf5ad5e715e6919d140c01bab1af1a737ebbf8a7775\
7602acd611f555ee2d5be56cc14b97c248009cd77490a3dfd6762bae25459a544e369eb4b0cc952a\
8e6a551ff35a4a7a6e5f5b0b72495c4baadf3a26b9d5d97402ad60fa2324e93adc96ca159b62d147\
5695f26ff27da100a76e2d273420572e61b4dfbd97e826d9d946f85b87434523f6aa7ce43c443285\
33f5b5adf32574167b1e9ea3bf6254d6afacf865894907de196285169cfcc1c0fcf438873d13f7e8\
654acc27c1abb00bec2729e34c994ff2152f60406f75db3ab616541795d9db8ca0b381148de7875f\
e7a8191407abc390718003698ca28498948caf1dbc3f02593dd85fa929ebae86cfe783d7be473e98\
0060d9ec60843661cb4cb9b8ddb24bb710f93700b22530501b5ea26c5c94c7370fe0ccbafe0ce7e4\
cd4f071d0cf0ac151c85a5b132ecaa75793abfb4a6ee33fddd2aa2f5cf2a8eb19c75322792c0d8dc\
1efb2dcd8ae2b49dd57b84898f531c7f745464f637716151831db56b3e293f587dc95a5e12edfe6b\
8458033dddf3556da55bef55ba3c3769def0c0f0c86786aca8313dc0ce09118760721eb545d69b46\
cdb89d377f2c80e67b572da0f75760c2849288a8457c18c6f0b58244b7f95a7567ce23756f1fe359\
64f7e84fbe28b188157519dd99b8798b076e21984d15c37f41da1309e0fbc539e8b9b09fed36a908\
28c94f72e7b755c187e58db6bfef0c02309086626ad0fe2efd2ff1467b3de11e057687865f4f85e7\
0a39bcbc4674dcaded9b04562afe08eb92fbd96ea4a99aa4f9347a075d4421f070ce3a33225f5af1\
9c27ec5d1720e659ca7fff9686f46b01d76d7de64c738671aaec57ee5582ef7956206fb37c6a36f8\
8f226ce2124a7f9894a0e9a7aa02001746e6def35699d7adc84a7dcf513ff3da20fd849950f41a5d\
bb02c91666697156d69ebbe2ef26732b6595d1b6d014a60006d2d3c7055ff9b531779195b8dcd7d9\
426e776cbc9041735384568ba4adbf7eeea7e0e6cbb47b70335a7ed12a68904eecd334921e4ae6d9\
c983af20d73215c39573963f03bc87082450cc1c70250e1e8eaa318acaf044a072891fc60324d134\
6c0a1d02cceb4d4806e536d6017bf6bc125c41694ded38766fea51bfbf7a008ca0b3eb1168766486\
8aa8469b3e6787a5d5bad6cd67c24005a5cbaa10b63d1b4d05ac42a8b31263052a1260b5900be628\
4dcab4eb0cf5cda815412ced7bd78f87c00ac3581f41a04352a4a186805a5c9e37b14561a5fc97d2\
52ca4654fe3d82f42080c21483789cc4b4cbb568f79844f7a317aa2a6555774da26c6f027d3cb0ee\
9276c6dc4f285fc3b4b9a3cd51c8815cebf110e73c80a9b842cc3b7c80af13f702662b10e868eb61\
947000b390cd2f3a0899f6f1bab86acf767062f5526507790645ae13b9701ba96b3f873047c9d3b8\
5e8a5d904a01fbfe10e63495b6021e7cc082aa66679e4d92b3e4e2d62490b44f7e250584cedff0e7\
072a870ddaa9687a1eae11afc874d83065fb98dbc3cfd90f39517ff3015c71a8c0ab36a6483c7b87\
f41b2c832fa9428fe95ffba4e49cc553d9e2d33a540958da51588e5120fef6497bfaa96a4dcfc024\
8170c57f78e9ab9546efbbaf8e9ad6a993493577edd3d29ce8fd9a2e9eb4363b5b472a4ecb2065eb\
38f876a841af1f227a703248955c8978329dffcd8e065d8da4d42504796ff7abc62832ed86c4f8d0\
0f55cd567fb9d42524be57ebdacef730c3f94c0372f86fa1b0114f8620f553e4329b2a586fcfeedc\
af47934909090e14a1f1204e6f1681fb2df05356381e6340f4feaf0787e06218b0b0d8df51acb0bc\
f98546f33273adf260da959d6fc4a04872122af6508d124abb963c14c30e7c07fee368324921fe33\
9ae89490c5d6cdae0c356bb6921de95ea13b54e23800";
let a = &BigUint::from_str_radix(hex_a, 16).unwrap();
let b = &BigUint::from_str_radix(hex_b, 16).unwrap();
let c = &BigUint::from_str_radix(hex_c, 16).unwrap();
let result = BigUint::from_str_radix(hex_result, 16).unwrap();
assert_eq!(a * b * c, result);
assert_eq!(a * c * b, result);
assert_eq!(b * a * c, result);
assert_eq!(b * c * a, result);
assert_eq!(c * a * b, result);
assert_eq!(c * b * a, result);
}

78
vendor/num-bigint/tests/macros/mod.rs vendored Normal file
View File

@@ -0,0 +1,78 @@
#![allow(unused)]
/// Assert that an op works for all val/ref combinations
macro_rules! assert_op {
($left:ident $op:tt $right:ident == $expected:expr) => {
assert_eq!((&$left) $op (&$right), $expected);
assert_eq!((&$left) $op $right.clone(), $expected);
assert_eq!($left.clone() $op (&$right), $expected);
assert_eq!($left.clone() $op $right.clone(), $expected);
};
}
/// Assert that an assign-op works for all val/ref combinations
macro_rules! assert_assign_op {
($left:ident $op:tt $right:ident == $expected:expr) => {{
let mut left = $left.clone();
assert_eq!({ left $op &$right; left}, $expected);
let mut left = $left.clone();
assert_eq!({ left $op $right.clone(); left}, $expected);
}};
}
/// Assert that an op works for scalar left or right
macro_rules! assert_scalar_op {
(($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => {
$(
if let Some(left) = $left.$to() {
assert_op!(left $op $right == $expected);
}
if let Some(right) = $right.$to() {
assert_op!($left $op right == $expected);
}
)*
};
}
macro_rules! assert_unsigned_scalar_op {
($left:ident $op:tt $right:ident == $expected:expr) => {
assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128)
$left $op $right == $expected);
};
}
macro_rules! assert_signed_scalar_op {
($left:ident $op:tt $right:ident == $expected:expr) => {
assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128,
to_i8, to_i16, to_i32, to_i64, to_isize, to_i128)
$left $op $right == $expected);
};
}
/// Assert that an op works for scalar right
macro_rules! assert_scalar_assign_op {
(($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => {
$(
if let Some(right) = $right.$to() {
let mut left = $left.clone();
assert_eq!({ left $op right; left}, $expected);
}
)*
};
}
macro_rules! assert_unsigned_scalar_assign_op {
($left:ident $op:tt $right:ident == $expected:expr) => {
assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128)
$left $op $right == $expected);
};
}
macro_rules! assert_signed_scalar_assign_op {
($left:ident $op:tt $right:ident == $expected:expr) => {
assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128,
to_i8, to_i16, to_i32, to_i64, to_isize, to_i128)
$left $op $right == $expected);
};
}

181
vendor/num-bigint/tests/modpow.rs vendored Normal file
View File

@@ -0,0 +1,181 @@
static BIG_B: &str = "\
efac3c0a_0de55551_fee0bfe4_67fa017a_1a898fa1_6ca57cb1\
ca9e3248_cacc09a9_b99d6abc_38418d0f_82ae4238_d9a68832\
aadec7c1_ac5fed48_7a56a71b_67ac59d5_afb28022_20d9592d\
247c4efc_abbd9b75_586088ee_1dc00dc4_232a8e15_6e8191dd\
675b6ae0_c80f5164_752940bc_284b7cee_885c1e10_e495345b\
8fbe9cfd_e5233fe1_19459d0b_d64be53c_27de5a02_a829976b\
33096862_82dad291_bd38b6a9_be396646_ddaf8039_a2573c39\
1b14e8bc_2cb53e48_298c047e_d9879e9c_5a521076_f0e27df3\
990e1659_d3d8205b_6443ebc0_9918ebee_6764f668_9f2b2be3\
b59cbc76_d76d0dfc_d737c3ec_0ccf9c00_ad0554bf_17e776ad\
b4edf9cc_6ce540be_76229093_5c53893b";
static BIG_E: &str = "\
be0e6ea6_08746133_e0fbc1bf_82dba91e_e2b56231_a81888d2\
a833a1fc_f7ff002a_3c486a13_4f420bf3_a5435be9_1a5c8391\
774d6e6c_085d8357_b0c97d4d_2bb33f7c_34c68059_f78d2541\
eacc8832_426f1816_d3be001e_b69f9242_51c7708e_e10efe98\
449c9a4a_b55a0f23_9d797410_515da00d_3ea07970_4478a2ca\
c3d5043c_bd9be1b4_6dce479d_4302d344_84a939e6_0ab5ada7\
12ae34b2_30cc473c_9f8ee69d_2cac5970_29f5bf18_bc8203e4\
f3e895a2_13c94f1e_24c73d77_e517e801_53661fdd_a2ce9e47\
a73dd7f8_2f2adb1e_3f136bf7_8ae5f3b8_08730de1_a4eff678\
e77a06d0_19a522eb_cbefba2a_9caf7736_b157c5c6_2d192591\
17946850_2ddb1822_117b68a0_32f7db88";
// This modulus is the prime from the 2048-bit MODP DH group:
// https://tools.ietf.org/html/rfc3526#section-3
static BIG_M: &str = "\
FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\
29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\
EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\
E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\
EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\
C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\
83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\
670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\
E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\
DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\
15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF";
static BIG_R: &str = "\
a1468311_6e56edc9_7a98228b_5e924776_0dd7836e_caabac13\
eda5373b_4752aa65_a1454850_40dc770e_30aa8675_6be7d3a8\
9d3085e4_da5155cf_b451ef62_54d0da61_cf2b2c87_f495e096\
055309f7_77802bbb_37271ba8_1313f1b5_075c75d1_024b6c77\
fdb56f17_b05bce61_e527ebfd_2ee86860_e9907066_edd526e7\
93d289bf_6726b293_41b0de24_eff82424_8dfd374b_4ec59542\
35ced2b2_6b195c90_10042ffb_8f58ce21_bc10ec42_64fda779\
d352d234_3d4eaea6_a86111ad_a37e9555_43ca78ce_2885bed7\
5a30d182_f1cf6834_dc5b6e27_1a41ac34_a2e91e11_33363ff0\
f88a7b04_900227c9_f6e6d06b_7856b4bb_4e354d61_060db6c8\
109c4735_6e7db425_7b5d74c7_0b709508";
mod biguint {
use num_bigint::BigUint;
use num_integer::Integer;
use num_traits::Num;
fn check_modpow<T: Into<BigUint>>(b: T, e: T, m: T, r: T) {
let b: BigUint = b.into();
let e: BigUint = e.into();
let m: BigUint = m.into();
let r: BigUint = r.into();
assert_eq!(b.modpow(&e, &m), r);
let even_m = &m << 1;
let even_modpow = b.modpow(&e, &even_m);
assert!(even_modpow < even_m);
assert_eq!(even_modpow.mod_floor(&m), r);
}
#[test]
fn test_modpow_single() {
check_modpow::<u32>(1, 0, 11, 1);
check_modpow::<u32>(0, 15, 11, 0);
check_modpow::<u32>(3, 7, 11, 9);
check_modpow::<u32>(5, 117, 19, 1);
check_modpow::<u32>(20, 1, 2, 0);
check_modpow::<u32>(20, 1, 3, 2);
}
#[test]
fn test_modpow_small() {
for b in 0u64..11 {
for e in 0u64..11 {
for m in 1..11 {
check_modpow::<u64>(b, e, m, b.pow(e as u32) % m);
}
}
}
}
#[test]
fn test_modpow_big() {
let b = BigUint::from_str_radix(super::BIG_B, 16).unwrap();
let e = BigUint::from_str_radix(super::BIG_E, 16).unwrap();
let m = BigUint::from_str_radix(super::BIG_M, 16).unwrap();
let r = BigUint::from_str_radix(super::BIG_R, 16).unwrap();
assert_eq!(b.modpow(&e, &m), r);
let even_m = &m << 1;
let even_modpow = b.modpow(&e, &even_m);
assert!(even_modpow < even_m);
assert_eq!(even_modpow % m, r);
}
}
mod bigint {
use num_bigint::BigInt;
use num_integer::Integer;
use num_traits::{Num, One, Signed};
fn check_modpow<T: Into<BigInt>>(b: T, e: T, m: T, r: T) {
fn check(b: &BigInt, e: &BigInt, m: &BigInt, r: &BigInt) {
assert_eq!(&b.modpow(e, m), r, "{} ** {} (mod {}) != {}", b, e, m, r);
let even_m = m << 1u8;
let even_modpow = b.modpow(e, m);
assert!(even_modpow.abs() < even_m.abs());
assert_eq!(&even_modpow.mod_floor(m), r);
// the sign of the result follows the modulus like `mod_floor`, not `rem`
assert_eq!(b.modpow(&BigInt::one(), m), b.mod_floor(m));
}
let b: BigInt = b.into();
let e: BigInt = e.into();
let m: BigInt = m.into();
let r: BigInt = r.into();
let neg_b_r = if e.is_odd() {
(-&r).mod_floor(&m)
} else {
r.clone()
};
let neg_m_r = r.mod_floor(&-&m);
let neg_bm_r = neg_b_r.mod_floor(&-&m);
check(&b, &e, &m, &r);
check(&-&b, &e, &m, &neg_b_r);
check(&b, &e, &-&m, &neg_m_r);
check(&-b, &e, &-&m, &neg_bm_r);
}
#[test]
fn test_modpow() {
check_modpow(1, 0, 11, 1);
check_modpow(0, 15, 11, 0);
check_modpow(3, 7, 11, 9);
check_modpow(5, 117, 19, 1);
check_modpow(-20, 1, 2, 0);
check_modpow(-20, 1, 3, 1);
}
#[test]
fn test_modpow_small() {
for b in -10i64..11 {
for e in 0i64..11 {
for m in -10..11 {
if m == 0 {
continue;
}
check_modpow(b, e, m, b.pow(e as u32).mod_floor(&m));
}
}
}
}
#[test]
fn test_modpow_big() {
let b = BigInt::from_str_radix(super::BIG_B, 16).unwrap();
let e = BigInt::from_str_radix(super::BIG_E, 16).unwrap();
let m = BigInt::from_str_radix(super::BIG_M, 16).unwrap();
let r = BigInt::from_str_radix(super::BIG_R, 16).unwrap();
check_modpow(b, e, m, r);
}
}

159
vendor/num-bigint/tests/roots.rs vendored Normal file
View File

@@ -0,0 +1,159 @@
mod biguint {
use num_bigint::BigUint;
use num_traits::{One, Zero};
fn check<T: Into<BigUint>>(x: T, n: u32) {
let x: BigUint = x.into();
let root = x.nth_root(n);
println!("check {}.nth_root({}) = {}", x, n, root);
if n == 2 {
assert_eq!(root, x.sqrt())
} else if n == 3 {
assert_eq!(root, x.cbrt())
}
let lo = root.pow(n);
assert!(lo <= x);
assert_eq!(lo.nth_root(n), root);
if !lo.is_zero() {
assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32);
}
let hi = (&root + 1u32).pow(n);
assert!(hi > x);
assert_eq!(hi.nth_root(n), &root + 1u32);
assert_eq!((&hi - 1u32).nth_root(n), root);
}
#[test]
fn test_sqrt() {
check(99u32, 2);
check(100u32, 2);
check(120u32, 2);
}
#[test]
fn test_cbrt() {
check(8u32, 3);
check(26u32, 3);
}
#[test]
fn test_nth_root() {
check(0u32, 1);
check(10u32, 1);
check(100u32, 4);
}
#[test]
#[should_panic]
fn test_nth_root_n_is_zero() {
check(4u32, 0);
}
#[test]
fn test_nth_root_big() {
let x = BigUint::from(123_456_789_u32);
let expected = BigUint::from(6u32);
assert_eq!(x.nth_root(10), expected);
check(x, 10);
}
#[test]
fn test_nth_root_googol() {
let googol = BigUint::from(10u32).pow(100u32);
// perfect divisors of 100
for &n in &[2, 4, 5, 10, 20, 25, 50, 100] {
let expected = BigUint::from(10u32).pow(100u32 / n);
assert_eq!(googol.nth_root(n), expected);
check(googol.clone(), n);
}
}
#[test]
fn test_nth_root_twos() {
const EXP: u32 = 12;
const LOG2: usize = 1 << EXP;
let x = BigUint::one() << LOG2;
// the perfect divisors are just powers of two
for exp in 1..=EXP {
let n = 2u32.pow(exp);
let expected = BigUint::one() << (LOG2 / n as usize);
assert_eq!(x.nth_root(n), expected);
check(x.clone(), n);
}
// degenerate cases should return quickly
assert!(x.nth_root(x.bits() as u32).is_one());
assert!(x.nth_root(i32::MAX as u32).is_one());
assert!(x.nth_root(u32::MAX).is_one());
}
#[test]
fn test_roots_rand1() {
// A random input that found regressions
let s = "575981506858479247661989091587544744717244516135539456183849\
986593934723426343633698413178771587697273822147578889823552\
182702908597782734558103025298880194023243541613924361007059\
353344183590348785832467726433749431093350684849462759540710\
026019022227591412417064179299354183441181373862905039254106\
4781867";
let x: BigUint = s.parse().unwrap();
check(x.clone(), 2);
check(x.clone(), 3);
check(x.clone(), 10);
check(x, 100);
}
}
mod bigint {
use num_bigint::BigInt;
use num_traits::Signed;
fn check(x: i64, n: u32) {
let big_x = BigInt::from(x);
let res = big_x.nth_root(n);
if n == 2 {
assert_eq!(&res, &big_x.sqrt())
} else if n == 3 {
assert_eq!(&res, &big_x.cbrt())
}
if big_x.is_negative() {
assert!(res.pow(n) >= big_x);
assert!((res - 1u32).pow(n) < big_x);
} else {
assert!(res.pow(n) <= big_x);
assert!((res + 1u32).pow(n) > big_x);
}
}
#[test]
fn test_nth_root() {
check(-100, 3);
}
#[test]
#[should_panic]
fn test_nth_root_x_neg_n_even() {
check(-100, 4);
}
#[test]
#[should_panic]
fn test_sqrt_x_neg() {
check(-4, 2);
}
#[test]
fn test_cbrt() {
check(8, 3);
check(-8, 3);
}
}