You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@teaclave.apache.org by hs...@apache.org on 2023/06/08 09:22:14 UTC

[incubator-teaclave-crates] branch main updated: Add rayon and tantivy

This is an automated email from the ASF dual-hosted git repository.

hsun pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-teaclave-crates.git


The following commit(s) were added to refs/heads/main by this push:
     new c979a7a  Add rayon and tantivy
c979a7a is described below

commit c979a7afdb748716696ce683a250fbb50a98a23f
Author: sunhe05 <su...@baidu.com>
AuthorDate: Thu Jun 8 09:18:27 2023 +0000

    Add rayon and tantivy
---
 README.md                                          |      2 +
 rayon/.github/workflows/ci.yaml                    |    100 +
 rayon/.github/workflows/master.yaml                |     27 +
 rayon/.github/workflows/pr.yaml                    |     41 +
 rayon/.gitignore                                   |      6 +
 rayon/Cargo.toml                                   |     31 +
 rayon/FAQ.md                                       |    227 +
 rayon/LICENSE-APACHE                               |    201 +
 rayon/LICENSE-MIT                                  |     25 +
 rayon/README.md                                    |    144 +
 rayon/RELEASES.md                                  |    862 +
 rayon/bors.toml                                    |     17 +
 rayon/ci/alt-core/Cargo.toml                       |     10 +
 rayon/ci/alt-core/build.rs                         |      1 +
 rayon/ci/alt-core/src/lib.rs                       |      0
 rayon/ci/compat-Cargo.lock                         |   1781 +
 rayon/ci/highlander.sh                             |     12 +
 rayon/ci/highlander/Cargo.toml                     |     17 +
 rayon/ci/highlander/src/main.rs                    |      1 +
 rayon/rayon-core/Cargo.toml                        |     57 +
 rayon/rayon-core/LICENSE-APACHE                    |    201 +
 rayon/rayon-core/LICENSE-MIT                       |     25 +
 rayon/rayon-core/README.md                         |     11 +
 rayon/rayon-core/build.rs                          |      7 +
 rayon/rayon-core/src/broadcast/mod.rs              |    151 +
 rayon/rayon-core/src/broadcast/test.rs             |    262 +
 rayon/rayon-core/src/compile_fail/mod.rs           |      7 +
 .../rayon-core/src/compile_fail/quicksort_race1.rs |     28 +
 .../rayon-core/src/compile_fail/quicksort_race2.rs |     28 +
 .../rayon-core/src/compile_fail/quicksort_race3.rs |     28 +
 rayon/rayon-core/src/compile_fail/rc_return.rs     |     17 +
 rayon/rayon-core/src/compile_fail/rc_upvar.rs      |      9 +
 .../rayon-core/src/compile_fail/scope_join_bad.rs  |     24 +
 rayon/rayon-core/src/job.rs                        |    270 +
 rayon/rayon-core/src/join/mod.rs                   |    188 +
 rayon/rayon-core/src/join/test.rs                  |    151 +
 rayon/rayon-core/src/latch.rs                      |    414 +
 rayon/rayon-core/src/lib.rs                        |    841 +
 rayon/rayon-core/src/log.rs                        |    421 +
 rayon/rayon-core/src/private.rs                    |     26 +
 rayon/rayon-core/src/registry.rs                   |   1029 +
 rayon/rayon-core/src/scope/mod.rs                  |    865 +
 rayon/rayon-core/src/scope/test.rs                 |    619 +
 rayon/rayon-core/src/sleep/README.md               |    219 +
 rayon/rayon-core/src/sleep/counters.rs             |    277 +
 rayon/rayon-core/src/sleep/mod.rs                  |    394 +
 rayon/rayon-core/src/spawn/mod.rs                  |    163 +
 rayon/rayon-core/src/spawn/test.rs                 |    255 +
 rayon/rayon-core/src/test.rs                       |    200 +
 rayon/rayon-core/src/thread_pool/mod.rs            |    471 +
 rayon/rayon-core/src/thread_pool/test.rs           |    418 +
 rayon/rayon-core/src/unwind.rs                     |     31 +
 rayon/rayon-core/tests/double_init_fail.rs         |     15 +
 rayon/rayon-core/tests/init_zero_threads.rs        |     10 +
 rayon/rayon-core/tests/scope_join.rs               |     45 +
 rayon/rayon-core/tests/scoped_threadpool.rs        |     99 +
 rayon/rayon-core/tests/simple_panic.rs             |      7 +
 rayon/rayon-core/tests/stack_overflow_crash.rs     |     97 +
 rayon/rayon-demo/Cargo.toml                        |     32 +
 rayon/rayon-demo/data/README.md                    |      2 +
 rayon/rayon-demo/data/tsp/README.md                |      9 +
 rayon/rayon-demo/data/tsp/dj10.tsp                 |     16 +
 rayon/rayon-demo/data/tsp/dj15.tsp                 |     21 +
 rayon/rayon-demo/data/tsp/dj38.tsp                 |     48 +
 rayon/rayon-demo/examples/README.md                |      3 +
 rayon/rayon-demo/examples/cpu_monitor.rs           |     81 +
 rayon/rayon-demo/src/cpu_time/mod.rs               |     54 +
 rayon/rayon-demo/src/cpu_time/unix.rs              |     17 +
 rayon/rayon-demo/src/cpu_time/win.rs               |     26 +
 rayon/rayon-demo/src/factorial/mod.rs              |     96 +
 rayon/rayon-demo/src/fibonacci/mod.rs              |    121 +
 rayon/rayon-demo/src/find/mod.rs                   |     92 +
 rayon/rayon-demo/src/join_microbench.rs            |     71 +
 rayon/rayon-demo/src/lib.rs                        |      3 +
 rayon/rayon-demo/src/life/bench.rs                 |     16 +
 rayon/rayon-demo/src/life/mod.rs                   |    312 +
 rayon/rayon-demo/src/main.rs                       |     98 +
 rayon/rayon-demo/src/map_collect.rs                |    332 +
 rayon/rayon-demo/src/matmul/bench.rs               |     13 +
 rayon/rayon-demo/src/matmul/mod.rs                 |    423 +
 rayon/rayon-demo/src/mergesort/bench.rs            |     23 +
 rayon/rayon-demo/src/mergesort/mod.rs              |    272 +
 rayon/rayon-demo/src/nbody/bench.rs                |     47 +
 rayon/rayon-demo/src/nbody/mod.rs                  |    142 +
 rayon/rayon-demo/src/nbody/nbody.rs                |    474 +
 rayon/rayon-demo/src/nbody/visualize.rs            |    210 +
 rayon/rayon-demo/src/noop/mod.rs                   |     36 +
 rayon/rayon-demo/src/pythagoras/mod.rs             |    104 +
 rayon/rayon-demo/src/quicksort/bench.rs            |     46 +
 rayon/rayon-demo/src/quicksort/mod.rs              |    147 +
 rayon/rayon-demo/src/sieve/bench.rs                |     28 +
 rayon/rayon-demo/src/sieve/mod.rs                  |    207 +
 rayon/rayon-demo/src/sort.rs                       |    291 +
 rayon/rayon-demo/src/str_split.rs                  |     65 +
 rayon/rayon-demo/src/tsp/bench.rs                  |     54 +
 rayon/rayon-demo/src/tsp/graph.rs                  |    114 +
 rayon/rayon-demo/src/tsp/mod.rs                    |    113 +
 rayon/rayon-demo/src/tsp/parser.rs                 |    228 +
 rayon/rayon-demo/src/tsp/solver.rs                 |    108 +
 rayon/rayon-demo/src/tsp/step.rs                   |    186 +
 rayon/rayon-demo/src/tsp/tour.rs                   |     62 +
 rayon/rayon-demo/src/tsp/weight.rs                 |     70 +
 rayon/rayon-demo/src/vec_collect.rs                |    229 +
 rayon/scripts/analyze.sh                           |     30 +
 rayon/src/array.rs                                 |     85 +
 rayon/src/collections/binary_heap.rs               |    120 +
 rayon/src/collections/btree_map.rs                 |     66 +
 rayon/src/collections/btree_set.rs                 |     52 +
 rayon/src/collections/hash_map.rs                  |     96 +
 rayon/src/collections/hash_set.rs                  |     80 +
 rayon/src/collections/linked_list.rs               |     66 +
 rayon/src/collections/mod.rs                       |     84 +
 rayon/src/collections/vec_deque.rs                 |    159 +
 .../compile_fail/cannot_collect_filtermap_data.rs  |     14 +
 rayon/src/compile_fail/cannot_zip_filtered_data.rs |     14 +
 rayon/src/compile_fail/cell_par_iter.rs            |     13 +
 rayon/src/compile_fail/mod.rs                      |      7 +
 rayon/src/compile_fail/must_use.rs                 |     69 +
 rayon/src/compile_fail/no_send_par_iter.rs         |     58 +
 rayon/src/compile_fail/rc_par_iter.rs              |     15 +
 rayon/src/delegate.rs                              |    109 +
 rayon/src/iter/chain.rs                            |    268 +
 rayon/src/iter/chunks.rs                           |    226 +
 rayon/src/iter/cloned.rs                           |    223 +
 rayon/src/iter/collect/consumer.rs                 |    186 +
 rayon/src/iter/collect/mod.rs                      |    116 +
 rayon/src/iter/collect/test.rs                     |    373 +
 rayon/src/iter/copied.rs                           |    223 +
 rayon/src/iter/empty.rs                            |    104 +
 rayon/src/iter/enumerate.rs                        |    133 +
 rayon/src/iter/extend.rs                           |    604 +
 rayon/src/iter/filter.rs                           |    141 +
 rayon/src/iter/filter_map.rs                       |    142 +
 rayon/src/iter/find.rs                             |    120 +
 rayon/src/iter/find_first_last/mod.rs              |    238 +
 rayon/src/iter/find_first_last/test.rs             |    106 +
 rayon/src/iter/flat_map.rs                         |    154 +
 rayon/src/iter/flat_map_iter.rs                    |    147 +
 rayon/src/iter/flatten.rs                          |    140 +
 rayon/src/iter/flatten_iter.rs                     |    132 +
 rayon/src/iter/fold.rs                             |    302 +
 rayon/src/iter/fold_chunks.rs                      |    236 +
 rayon/src/iter/fold_chunks_with.rs                 |    231 +
 rayon/src/iter/for_each.rs                         |     77 +
 rayon/src/iter/from_par_iter.rs                    |    228 +
 rayon/src/iter/inspect.rs                          |    257 +
 rayon/src/iter/interleave.rs                       |    336 +
 rayon/src/iter/interleave_shortest.rs              |     85 +
 rayon/src/iter/intersperse.rs                      |    410 +
 rayon/src/iter/len.rs                              |    271 +
 rayon/src/iter/map.rs                              |    259 +
 rayon/src/iter/map_with.rs                         |    573 +
 rayon/src/iter/mod.rs                              |   3531 +
 rayon/src/iter/multizip.rs                         |    338 +
 rayon/src/iter/noop.rs                             |     59 +
 rayon/src/iter/once.rs                             |     68 +
 rayon/src/iter/panic_fuse.rs                       |    342 +
 rayon/src/iter/par_bridge.rs                       |    162 +
 rayon/src/iter/plumbing/README.md                  |    315 +
 rayon/src/iter/plumbing/mod.rs                     |    484 +
 rayon/src/iter/positions.rs                        |    137 +
 rayon/src/iter/product.rs                          |    114 +
 rayon/src/iter/reduce.rs                           |    116 +
 rayon/src/iter/repeat.rs                           |    241 +
 rayon/src/iter/rev.rs                              |    123 +
 rayon/src/iter/skip.rs                             |     95 +
 rayon/src/iter/skip_any.rs                         |    144 +
 rayon/src/iter/skip_any_while.rs                   |    166 +
 rayon/src/iter/splitter.rs                         |    174 +
 rayon/src/iter/step_by.rs                          |    143 +
 rayon/src/iter/sum.rs                              |    110 +
 rayon/src/iter/take.rs                             |     86 +
 rayon/src/iter/take_any.rs                         |    144 +
 rayon/src/iter/take_any_while.rs                   |    166 +
 rayon/src/iter/test.rs                             |   2188 +
 rayon/src/iter/try_fold.rs                         |    298 +
 rayon/src/iter/try_reduce.rs                       |    131 +
 rayon/src/iter/try_reduce_with.rs                  |    132 +
 rayon/src/iter/unzip.rs                            |    525 +
 rayon/src/iter/update.rs                           |    327 +
 rayon/src/iter/while_some.rs                       |    154 +
 rayon/src/iter/zip.rs                              |    159 +
 rayon/src/iter/zip_eq.rs                           |     72 +
 rayon/src/lib.rs                                   |    160 +
 rayon/src/math.rs                                  |     54 +
 rayon/src/option.rs                                |    203 +
 rayon/src/par_either.rs                            |     74 +
 rayon/src/prelude.rs                               |     17 +
 rayon/src/private.rs                               |     26 +
 rayon/src/range.rs                                 |    462 +
 rayon/src/range_inclusive.rs                       |    386 +
 rayon/src/result.rs                                |    132 +
 rayon/src/slice/chunks.rs                          |    389 +
 rayon/src/slice/mergesort.rs                       |    755 +
 rayon/src/slice/mod.rs                             |   1041 +
 rayon/src/slice/quicksort.rs                       |    903 +
 rayon/src/slice/rchunks.rs                         |    386 +
 rayon/src/slice/test.rs                            |    170 +
 rayon/src/split_producer.rs                        |    132 +
 rayon/src/str.rs                                   |    848 +
 rayon/src/string.rs                                |     48 +
 rayon/src/vec.rs                                   |    283 +
 rayon/tests/chars.rs                               |     39 +
 rayon/tests/clones.rs                              |    216 +
 rayon/tests/collect.rs                             |    113 +
 rayon/tests/cross-pool.rs                          |     22 +
 rayon/tests/debug.rs                               |    225 +
 rayon/tests/drain_vec.rs                           |     41 +
 rayon/tests/intersperse.rs                         |     60 +
 rayon/tests/issue671-unzip.rs                      |     17 +
 rayon/tests/issue671.rs                            |     16 +
 rayon/tests/iter_panic.rs                          |     53 +
 rayon/tests/named-threads.rs                       |     25 +
 rayon/tests/octillion.rs                           |    156 +
 rayon/tests/par_bridge_recursion.rs                |     31 +
 rayon/tests/producer_split_at.rs                   |    394 +
 rayon/tests/sort-panic-safe.rs                     |    164 +
 rayon/tests/str.rs                                 |    134 +
 tantivy/.github/FUNDING.yml                        |     12 +
 tantivy/.github/ISSUE_TEMPLATE/actions.md          |     13 +
 tantivy/.github/ISSUE_TEMPLATE/bug_report.md       |     19 +
 tantivy/.github/ISSUE_TEMPLATE/feature_request.md  |     14 +
 tantivy/.github/ISSUE_TEMPLATE/question.md         |      7 +
 tantivy/.github/dependabot.yml                     |     15 +
 tantivy/.github/workflows/coverage.yml             |     26 +
 tantivy/.github/workflows/long_running.yml         |     28 +
 tantivy/.github/workflows/test.yml                 |     74 +
 tantivy/.gitignore                                 |     15 +
 tantivy/ARCHITECTURE.md                            |    295 +
 tantivy/AUTHORS                                    |     11 +
 tantivy/CHANGELOG.md                               |    532 +
 tantivy/Cargo.toml                                 |    131 +
 tantivy/LICENSE                                    |      7 +
 tantivy/Makefile                                   |      6 +
 tantivy/README.md                                  |    173 +
 tantivy/appveyor.yml                               |     23 +
 tantivy/benches/alice.txt                          |   3774 +
 tantivy/benches/analyzer.rs                        |     22 +
 tantivy/benches/hdfs.json                          | 100000 ++++++++++++++++++
 tantivy/benches/index-bench.rs                     |    121 +
 tantivy/bitpacker/Cargo.toml                       |     17 +
 tantivy/bitpacker/benches/bench.rs                 |     35 +
 tantivy/bitpacker/src/bitpacker.rs                 |    145 +
 tantivy/bitpacker/src/blocked_bitpacker.rs         |    179 +
 tantivy/bitpacker/src/lib.rs                       |     80 +
 tantivy/ci/before_deploy.ps1                       |     23 +
 tantivy/ci/before_deploy.sh                        |     33 +
 tantivy/ci/install.sh                              |     47 +
 tantivy/ci/script.sh                               |     30 +
 tantivy/common/Cargo.toml                          |     21 +
 tantivy/common/src/bitset.rs                       |    737 +
 tantivy/common/src/lib.rs                          |    171 +
 tantivy/common/src/serialize.rs                    |    308 +
 tantivy/common/src/vint.rs                         |    323 +
 tantivy/common/src/writer.rs                       |    114 +
 tantivy/doc/.gitignore                             |      1 +
 tantivy/doc/assets/images/Nuclia.png               |    Bin 0 -> 3196 bytes
 tantivy/doc/assets/images/element-dark-theme.png   |    Bin 0 -> 56831 bytes
 tantivy/doc/assets/images/element.io.svg           |      8 +
 tantivy/doc/assets/images/etsy.png                 |    Bin 0 -> 87274 bytes
 .../doc/assets/images/humanfirst.ai-dark-theme.png |    Bin 0 -> 23167 bytes
 tantivy/doc/assets/images/humanfirst.png           |    Bin 0 -> 104353 bytes
 tantivy/doc/assets/images/nuclia-dark-theme.png    |    Bin 0 -> 8008 bytes
 tantivy/doc/assets/images/searchbenchmark.png      |    Bin 0 -> 668993 bytes
 tantivy/doc/book.toml                              |      5 +
 tantivy/doc/src/SUMMARY.md                         |     14 +
 tantivy/doc/src/avant-propos.md                    |     34 +
 tantivy/doc/src/basis.md                           |     68 +
 tantivy/doc/src/best_practise.md.rs                |      0
 tantivy/doc/src/examples.md                        |      3 +
 tantivy/doc/src/facetting.md                       |      5 +
 tantivy/doc/src/faq.md                             |      0
 tantivy/doc/src/index_sorting.md                   |     62 +
 tantivy/doc/src/innerworkings.md                   |      1 +
 tantivy/doc/src/inverted_index.md                  |      1 +
 tantivy/doc/src/json.md                            |    130 +
 tantivy/doc/src/schema.md                          |      1 +
 tantivy/examples/aggregation.rs                    |    130 +
 tantivy/examples/basic_search.rs                   |    225 +
 tantivy/examples/custom_collector.rs               |    181 +
 tantivy/examples/custom_tokenizer.rs               |    111 +
 tantivy/examples/date_time_field.rs                |     69 +
 tantivy/examples/deleting_updating_documents.rs    |    144 +
 tantivy/examples/faceted_search.rs                 |    112 +
 .../examples/faceted_search_with_tweaked_score.rs  |     98 +
 tantivy/examples/integer_range_search.rs           |     35 +
 tantivy/examples/iterating_docs_and_positions.rs   |    135 +
 tantivy/examples/json_field.rs                     |    105 +
 tantivy/examples/multiple_producer.rs              |    104 +
 tantivy/examples/pre_tokenized_text.rs             |    135 +
 tantivy/examples/snippet.rs                        |     85 +
 tantivy/examples/stop_words.rs                     |    113 +
 tantivy/examples/warmer.rs                         |    219 +
 tantivy/examples/working_with_json.rs              |     40 +
 tantivy/fastfield_codecs/Cargo.toml                |     34 +
 tantivy/fastfield_codecs/README.md                 |     68 +
 tantivy/fastfield_codecs/benches/bench.rs          |    246 +
 tantivy/fastfield_codecs/src/bitpacked.rs          |    116 +
 tantivy/fastfield_codecs/src/blockwise_linear.rs   |    186 +
 tantivy/fastfield_codecs/src/column.rs             |    352 +
 .../src/compact_space/blank_range.rs               |     43 +
 .../src/compact_space/build_compact_space.rs       |    231 +
 tantivy/fastfield_codecs/src/compact_space/mod.rs  |    821 +
 tantivy/fastfield_codecs/src/format_version.rs     |     39 +
 tantivy/fastfield_codecs/src/gcd.rs                |    170 +
 tantivy/fastfield_codecs/src/lib.rs                |    567 +
 tantivy/fastfield_codecs/src/line.rs               |    222 +
 tantivy/fastfield_codecs/src/linear.rs             |    231 +
 tantivy/fastfield_codecs/src/main.rs               |    222 +
 tantivy/fastfield_codecs/src/monotonic_mapping.rs  |    303 +
 .../fastfield_codecs/src/monotonic_mapping_u128.rs |     43 +
 tantivy/fastfield_codecs/src/null_index_footer.rs  |    144 +
 tantivy/fastfield_codecs/src/serialize.rs          |    355 +
 tantivy/ownedbytes/Cargo.toml                      |     15 +
 tantivy/ownedbytes/src/lib.rs                      |    358 +
 tantivy/query-grammar/Cargo.toml                   |     17 +
 tantivy/query-grammar/README.md                    |      3 +
 tantivy/query-grammar/src/lib.rs                   |     17 +
 tantivy/query-grammar/src/occur.rs                 |     72 +
 tantivy/query-grammar/src/query_grammar.rs         |    815 +
 tantivy/query-grammar/src/user_input_ast.rs        |    194 +
 tantivy/run-tests.sh                               |      2 +
 tantivy/rustfmt.toml                               |      7 +
 tantivy/src/aggregation/README.md                  |     36 +
 tantivy/src/aggregation/agg_req.rs                 |    369 +
 tantivy/src/aggregation/agg_req_with_accessor.rs   |    220 +
 tantivy/src/aggregation/agg_result.rs              |    254 +
 .../src/aggregation/bucket/histogram/histogram.rs  |   1524 +
 tantivy/src/aggregation/bucket/histogram/mod.rs    |      2 +
 tantivy/src/aggregation/bucket/mod.rs              |    140 +
 tantivy/src/aggregation/bucket/range.rs            |    874 +
 tantivy/src/aggregation/bucket/term_agg.rs         |   1419 +
 tantivy/src/aggregation/collector.rs               |    186 +
 tantivy/src/aggregation/date.rs                    |     18 +
 tantivy/src/aggregation/intermediate_agg_result.rs |    783 +
 tantivy/src/aggregation/metric/average.rs          |    114 +
 tantivy/src/aggregation/metric/mod.rs              |     30 +
 tantivy/src/aggregation/metric/stats.rs            |    371 +
 tantivy/src/aggregation/mod.rs                     |   1617 +
 tantivy/src/aggregation/segment_agg_result.rs      |    313 +
 tantivy/src/collector/count_collector.rs           |    110 +
 .../src/collector/custom_score_top_collector.rs    |    121 +
 tantivy/src/collector/docset_collector.rs          |     60 +
 tantivy/src/collector/facet_collector.rs           |    727 +
 tantivy/src/collector/filter_collector_wrapper.rs  |    189 +
 tantivy/src/collector/histogram_collector.rs       |    299 +
 tantivy/src/collector/mod.rs                       |    485 +
 tantivy/src/collector/multi_collector.rs           |    288 +
 tantivy/src/collector/tests.rs                     |    295 +
 tantivy/src/collector/top_collector.rs             |    384 +
 tantivy/src/collector/top_score_collector.rs       |   1104 +
 tantivy/src/collector/tweak_score_top_collector.rs |    124 +
 tantivy/src/core/executor.rs                       |    150 +
 tantivy/src/core/index.rs                          |    969 +
 tantivy/src/core/index_meta.rs                     |    547 +
 tantivy/src/core/inverted_index_reader.rs          |    253 +
 tantivy/src/core/mod.rs                            |     38 +
 tantivy/src/core/searcher.rs                       |    287 +
 tantivy/src/core/segment.rs                        |     90 +
 tantivy/src/core/segment_component.rs              |     47 +
 tantivy/src/core/segment_id.rs                     |    142 +
 tantivy/src/core/segment_reader.rs                 |    423 +
 tantivy/src/core/single_segment_index_writer.rs    |     51 +
 tantivy/src/directory/composite_file.rs            |    288 +
 tantivy/src/directory/directory.rs                 |    254 +
 tantivy/src/directory/directory_lock.rs            |     59 +
 tantivy/src/directory/error.rs                     |    202 +
 tantivy/src/directory/file_slice.rs                |    310 +
 tantivy/src/directory/file_watcher.rs              |    189 +
 tantivy/src/directory/footer.rs                    |    230 +
 tantivy/src/directory/managed_directory.rs         |    413 +
 tantivy/src/directory/mmap_directory.rs            |    649 +
 tantivy/src/directory/mod.rs                       |     58 +
 tantivy/src/directory/ram_directory.rs             |    292 +
 tantivy/src/directory/tests.rs                     |    276 +
 tantivy/src/directory/watch_event_router.rs        |    175 +
 tantivy/src/docset.rs                              |    171 +
 tantivy/src/error.rs                               |    196 +
 tantivy/src/fastfield/alive_bitset.rs              |    225 +
 tantivy/src/fastfield/bytes/mod.rs                 |    117 +
 tantivy/src/fastfield/bytes/reader.rs              |     58 +
 tantivy/src/fastfield/bytes/writer.rs              |    145 +
 tantivy/src/fastfield/error.rs                     |     26 +
 tantivy/src/fastfield/facet_reader.rs              |    179 +
 tantivy/src/fastfield/mod.rs                       |   1087 +
 tantivy/src/fastfield/multivalued/index.rs         |    148 +
 tantivy/src/fastfield/multivalued/mod.rs           |    619 +
 tantivy/src/fastfield/multivalued/reader.rs        |    286 +
 tantivy/src/fastfield/multivalued/writer.rs        |    442 +
 tantivy/src/fastfield/readers.rs                   |    317 +
 tantivy/src/fastfield/serializer/mod.rs            |    122 +
 tantivy/src/fastfield/writer.rs                    |    568 +
 tantivy/src/fieldnorm/code.rs                      |    329 +
 tantivy/src/fieldnorm/mod.rs                       |    154 +
 tantivy/src/fieldnorm/reader.rs                    |    194 +
 tantivy/src/fieldnorm/serializer.rs                |     34 +
 tantivy/src/fieldnorm/writer.rs                    |    117 +
 tantivy/src/functional_test.rs                     |    201 +
 tantivy/src/future_result.rs                       |    130 +
 tantivy/src/indexer/delete_queue.rs                |    306 +
 tantivy/src/indexer/demuxer.rs                     |    322 +
 tantivy/src/indexer/doc_id_mapping.rs              |    509 +
 tantivy/src/indexer/doc_opstamp_mapping.rs         |     62 +
 tantivy/src/indexer/flat_map_with_buffer.rs        |     69 +
 tantivy/src/indexer/index_writer.rs                |   2416 +
 tantivy/src/indexer/index_writer_status.rs         |    120 +
 tantivy/src/indexer/json_term_writer.rs            |    623 +
 tantivy/src/indexer/log_merge_policy.rs            |    376 +
 tantivy/src/indexer/merge_operation.rs             |     73 +
 tantivy/src/indexer/merge_policy.rs                |     64 +
 tantivy/src/indexer/merger.rs                      |   2039 +
 tantivy/src/indexer/merger_sorted_index_test.rs    |    573 +
 tantivy/src/indexer/mod.rs                         |    124 +
 tantivy/src/indexer/operation.rs                   |     25 +
 tantivy/src/indexer/prepared_commit.rs             |     52 +
 tantivy/src/indexer/segment_entry.rs               |     72 +
 tantivy/src/indexer/segment_manager.rs             |    222 +
 tantivy/src/indexer/segment_register.rs            |    147 +
 tantivy/src/indexer/segment_serializer.rs          |    110 +
 tantivy/src/indexer/segment_updater.rs             |   1095 +
 tantivy/src/indexer/segment_writer.rs              |    872 +
 tantivy/src/indexer/sorted_doc_id_column.rs        |    108 +
 .../src/indexer/sorted_doc_id_multivalue_column.rs |    170 +
 tantivy/src/indexer/stamper.rs                     |    132 +
 tantivy/src/lib.rs                                 |   1177 +
 tantivy/src/macros.rs                              |     97 +
 tantivy/src/positions/mod.rs                       |    237 +
 tantivy/src/positions/reader.rs                    |    149 +
 tantivy/src/positions/serializer.rs                |     92 +
 tantivy/src/postings/block_search.rs               |    100 +
 tantivy/src/postings/block_segment_postings.rs     |    525 +
 tantivy/src/postings/compression/mod.rs            |    384 +
 tantivy/src/postings/compression/vint.rs           |    108 +
 tantivy/src/postings/indexing_context.rs           |     27 +
 tantivy/src/postings/json_postings_writer.rs       |     97 +
 tantivy/src/postings/mod.rs                        |    760 +
 tantivy/src/postings/per_field_postings_writer.rs  |     73 +
 tantivy/src/postings/postings.rs                   |     26 +
 tantivy/src/postings/postings_writer.rs            |    271 +
 tantivy/src/postings/recorder.rs                   |    350 +
 tantivy/src/postings/segment_postings.rs           |    302 +
 tantivy/src/postings/serializer.rs                 |    475 +
 tantivy/src/postings/skip.rs                       |    405 +
 tantivy/src/postings/stacker/expull.rs             |    324 +
 tantivy/src/postings/stacker/memory_arena.rs       |    245 +
 tantivy/src/postings/stacker/mod.rs                |      7 +
 tantivy/src/postings/stacker/term_hashmap.rs       |    289 +
 tantivy/src/postings/term_info.rs                  |     79 +
 tantivy/src/query/all_query.rs                     |    135 +
 tantivy/src/query/automaton_weight.rs              |    171 +
 tantivy/src/query/bitset/mod.rs                    |    274 +
 tantivy/src/query/bm25.rs                          |    175 +
 tantivy/src/query/boolean_query/block_wand.rs      |    624 +
 tantivy/src/query/boolean_query/boolean_query.rs   |    322 +
 tantivy/src/query/boolean_query/boolean_weight.rs  |    274 +
 tantivy/src/query/boolean_query/mod.rs             |    318 +
 tantivy/src/query/boost_query.rs                   |    160 +
 tantivy/src/query/const_score_query.rs             |    176 +
 tantivy/src/query/disjunction_max_query.rs         |    131 +
 tantivy/src/query/empty_query.rs                   |     75 +
 tantivy/src/query/exclude.rs                       |    142 +
 tantivy/src/query/explanation.rs                   |     68 +
 tantivy/src/query/fuzzy_query.rs                   |    246 +
 tantivy/src/query/intersection.rs                  |    248 +
 tantivy/src/query/mod.rs                           |    114 +
 tantivy/src/query/more_like_this/mod.rs            |      7 +
 tantivy/src/query/more_like_this/more_like_this.rs |    379 +
 tantivy/src/query/more_like_this/query.rs          |    283 +
 tantivy/src/query/phrase_query/mod.rs              |    390 +
 tantivy/src/query/phrase_query/phrase_query.rs     |    136 +
 tantivy/src/query/phrase_query/phrase_scorer.rs    |    486 +
 tantivy/src/query/phrase_query/phrase_weight.rs    |    148 +
 tantivy/src/query/query.rs                         |    144 +
 tantivy/src/query/query_parser/logical_ast.rs      |    119 +
 tantivy/src/query/query_parser/mod.rs              |      4 +
 tantivy/src/query/query_parser/query_parser.rs     |   1571 +
 tantivy/src/query/range_query.rs                   |    688 +
 tantivy/src/query/range_query_ip_fastfield.rs      |    728 +
 tantivy/src/query/regex_query.rs                   |    179 +
 tantivy/src/query/reqopt_scorer.rs                 |    199 +
 tantivy/src/query/score_combiner.rs                |    116 +
 tantivy/src/query/scorer.rs                        |     24 +
 tantivy/src/query/set_query.rs                     |    244 +
 tantivy/src/query/term_query/mod.rs                |    216 +
 tantivy/src/query/term_query/term_query.rs         |    195 +
 tantivy/src/query/term_query/term_scorer.rs        |    332 +
 tantivy/src/query/term_query/term_weight.rs        |    143 +
 tantivy/src/query/union.rs                         |    443 +
 tantivy/src/query/vec_docset.rs                    |     88 +
 tantivy/src/query/weight.rs                        |    123 +
 tantivy/src/reader/mod.rs                          |    302 +
 tantivy/src/reader/warming.rs                      |    340 +
 tantivy/src/schema/bytes_options.rs                |    294 +
 tantivy/src/schema/date_time_options.rs            |    276 +
 tantivy/src/schema/document.rs                     |    282 +
 tantivy/src/schema/facet.rs                        |    354 +
 tantivy/src/schema/facet_options.rs                |     81 +
 tantivy/src/schema/field.rs                        |     33 +
 tantivy/src/schema/field_entry.rs                  |    210 +
 tantivy/src/schema/field_type.rs                   |    596 +
 tantivy/src/schema/field_value.rs                  |     49 +
 tantivy/src/schema/flags.rs                        |     91 +
 tantivy/src/schema/index_record_option.rs          |     52 +
 tantivy/src/schema/ip_options.rs                   |    168 +
 tantivy/src/schema/json_object_options.rs          |    151 +
 tantivy/src/schema/mod.rs                          |    172 +
 tantivy/src/schema/named_field_document.rs         |     13 +
 tantivy/src/schema/numeric_options.rs              |    286 +
 tantivy/src/schema/schema.rs                       |   1035 +
 tantivy/src/schema/term.rs                         |    518 +
 tantivy/src/schema/text_options.rs                 |    290 +
 tantivy/src/schema/value.rs                        |    552 +
 tantivy/src/snippet/mod.rs                         |    675 +
 tantivy/src/space_usage/mod.rs                     |    480 +
 tantivy/src/store/compression_brotli.rs            |     19 +
 tantivy/src/store/compression_lz4_block.rs         |     50 +
 tantivy/src/store/compression_snap.rs              |     17 +
 tantivy/src/store/compression_zstd_block.rs        |     54 +
 tantivy/src/store/compressors.rs                   |    259 +
 tantivy/src/store/decompressors.rs                 |    140 +
 tantivy/src/store/footer.rs                        |     81 +
 tantivy/src/store/index/block.rs                   |    174 +
 tantivy/src/store/index/mod.rs                     |    248 +
 tantivy/src/store/index/skip_index.rs              |    107 +
 tantivy/src/store/index/skip_index_builder.rs      |    117 +
 tantivy/src/store/mod.rs                           |    409 +
 tantivy/src/store/reader.rs                        |    421 +
 tantivy/src/store/store_compressor.rs              |    269 +
 tantivy/src/store/writer.rs                        |    138 +
 tantivy/src/termdict/fst_termdict/merger.rs        |    155 +
 tantivy/src/termdict/fst_termdict/mod.rs           |     28 +
 tantivy/src/termdict/fst_termdict/streamer.rs      |    147 +
 .../src/termdict/fst_termdict/term_info_store.rs   |    368 +
 tantivy/src/termdict/fst_termdict/termdict.rs      |    206 +
 tantivy/src/termdict/mod.rs                        |     38 +
 tantivy/src/termdict/sstable_termdict/merger.rs    |    120 +
 tantivy/src/termdict/sstable_termdict/mod.rs       |    144 +
 .../sstable_termdict/sstable/block_reader.rs       |     81 +
 .../src/termdict/sstable_termdict/sstable/delta.rs |    182 +
 .../sstable_termdict/sstable/merge/heap_merge.rs   |     72 +
 .../termdict/sstable_termdict/sstable/merge/mod.rs |    178 +
 .../src/termdict/sstable_termdict/sstable/mod.rs   |    359 +
 .../sstable_termdict/sstable/sstable_index.rs      |    164 +
 .../src/termdict/sstable_termdict/sstable/value.rs |     95 +
 .../src/termdict/sstable_termdict/sstable/vint.rs  |     67 +
 tantivy/src/termdict/sstable_termdict/streamer.rs  |    251 +
 tantivy/src/termdict/sstable_termdict/termdict.rs  |    258 +
 tantivy/src/termdict/tests.rs                      |    431 +
 tantivy/src/tokenizer/alphanum_only.rs             |     91 +
 tantivy/src/tokenizer/ascii_folding_filter.rs      |   4047 +
 tantivy/src/tokenizer/empty_tokenizer.rs           |     41 +
 tantivy/src/tokenizer/facet_tokenizer.rs           |    124 +
 tantivy/src/tokenizer/lower_caser.rs               |     86 +
 tantivy/src/tokenizer/mod.rs                       |    302 +
 tantivy/src/tokenizer/ngram_tokenizer.rs           |    456 +
 tantivy/src/tokenizer/raw_tokenizer.rs             |     68 +
 tantivy/src/tokenizer/remove_long.rs               |     96 +
 tantivy/src/tokenizer/simple_tokenizer.rs          |     86 +
 tantivy/src/tokenizer/split_compound_words.rs      |    252 +
 tantivy/src/tokenizer/stemmer.rs                   |    126 +
 .../tokenizer/stop_word_filter/gen_stopwords.py    |     42 +
 tantivy/src/tokenizer/stop_word_filter/mod.rs      |    141 +
 .../src/tokenizer/stop_word_filter/stopwords.rs    |   2117 +
 tantivy/src/tokenizer/tokenized_string.rs          |    102 +
 tantivy/src/tokenizer/tokenizer.rs                 |    311 +
 tantivy/src/tokenizer/tokenizer_manager.rs         |     78 +
 tantivy/src/tokenizer/whitespace_tokenizer.rs      |     86 +
 tantivy/tests/failpoints/mod.rs                    |    124 +
 tantivy/tests/mod.rs                               |      1 +
 569 files changed, 235427 insertions(+)

diff --git a/README.md b/README.md
index c1ceb7d..5c95da5 100644
--- a/README.md
+++ b/README.md
@@ -11,8 +11,10 @@ Below list the crates:
 - [mio](https://github.com/tokio-rs/mio/tree/7ed74bf478230a0cfa7543901f6be6df8bb3602e)
 - [num_cpus](https://github.com/seanmonstar/num_cpus/tree/e437b9d9083d717692e35d917de8674a7987dd06)
 - [rand](https://github.com/rust-random/rand/tree/3543f4b0258ecec04be570bbe9dc6e50d80bd3c1)
+- [rayon](https://github.com/rayon-rs/rayon/tree/3883630e0bcdcfd152fad36352893662a5bb380e)
 - [ring](https://github.com/briansmith/ring/tree/9cc0d45f4d8521f467bb3a621e74b1535e118188)
 - [rustface](https://github.com/atomashpolskiy/rustface/tree/93c97ed7d0fa1cc3553f5483d865292cc37ceb98)
 - [rustls](https://github.com/rustls/rustls/tree/92600efb4f6cc25bfe0c133b0b922d915ed826e3)
 - [rustls-0.19.1](https://github.com/rustls/rustls/tree/3c390ef7c459cc1ef2504bd9d1fefdcb7eea1c20)
 - [rusty-machine](https://github.com/AtheMathmo/rusty-machine/tree/e7cc57fc5e0f384aeb19169336deb5f66655c76a)
+- [tantivy](https://github.com/quickwit-oss/tantivy/tree/6761237ec71b4e25ee4b5661e794b4755c6c5e56)
diff --git a/rayon/.github/workflows/ci.yaml b/rayon/.github/workflows/ci.yaml
new file mode 100644
index 0000000..9db0d88
--- /dev/null
+++ b/rayon/.github/workflows/ci.yaml
@@ -0,0 +1,100 @@
+name: CI
+on:
+  push:
+    branches:
+      - staging
+      - trying
+
+env:
+  CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
+
+jobs:
+
+  check:
+    name: Check (1.59.0)
+    runs-on: ubuntu-latest
+    env:
+      CARGO_REGISTRIES_CRATES_IO_PROTOCOL: git
+    steps:
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@1.59.0
+      - run: cp ci/compat-Cargo.lock ./Cargo.lock
+      - run: cargo check --verbose --locked
+
+  test:
+    name: Test
+    runs-on: ${{ matrix.os }}
+    strategy:
+      matrix:
+        os: [ubuntu-latest, windows-latest, macos-latest]
+        rust: [stable, beta, nightly]
+    steps:
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@master
+        with:
+          toolchain: ${{ matrix.rust }}
+      - run: cargo build --verbose
+      - run: cargo test --verbose --package rayon
+      - run: cargo test --verbose --package rayon-core
+      - run: ./ci/highlander.sh
+
+  # rayon-demo has huge dependencies, so limit its testing.
+  # build on stable, test on nightly (because of #[bench])
+  demo:
+    name: Demo
+    runs-on: ubuntu-latest
+    strategy:
+      matrix:
+        rust: [stable, nightly]
+    steps:
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@master
+        with:
+          toolchain: ${{ matrix.rust }}
+      - run: cargo build --verbose --package rayon-demo
+      - run: cargo test --verbose --package rayon-demo
+        if: matrix.rust == 'nightly'
+
+  i686:
+    name: Test (ubuntu-latest, stable-i686)
+    runs-on: ubuntu-latest
+    steps:
+      - run: |
+          sudo apt-get update
+          sudo apt-get install gcc-multilib
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@master
+        with:
+          toolchain: stable-i686-unknown-linux-gnu
+      - run: cargo build --verbose
+      - run: cargo test --verbose --package rayon
+      - run: cargo test --verbose --package rayon-core
+
+  # wasm32-unknown-unknown builds, and even has the runtime fallback for
+  # unsupported threading, but we don't have an environment to execute in.
+  # wasm32-wasi can test the fallback by running in wasmtime.
+  wasm:
+    name: WebAssembly
+    runs-on: ubuntu-latest
+    env:
+      CARGO_TARGET_WASM32_WASI_RUNNER: /home/runner/.wasmtime/bin/wasmtime
+    steps:
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@stable
+        with:
+          targets: wasm32-unknown-unknown,wasm32-wasi
+      - run: cargo check --verbose --target wasm32-unknown-unknown
+      - run: cargo check --verbose --target wasm32-wasi
+      - run: curl https://wasmtime.dev/install.sh -sSf | bash
+      - run: cargo test --verbose --target wasm32-wasi --package rayon
+      - run: cargo test --verbose --target wasm32-wasi --package rayon-core
+
+  fmt:
+    name: Format
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@1.67.1
+        with:
+          components: rustfmt
+      - run: cargo fmt --all --check
diff --git a/rayon/.github/workflows/master.yaml b/rayon/.github/workflows/master.yaml
new file mode 100644
index 0000000..6ab6a45
--- /dev/null
+++ b/rayon/.github/workflows/master.yaml
@@ -0,0 +1,27 @@
+name: master
+on:
+  push:
+    branches:
+      - master
+  schedule:
+    - cron: '0 0 * * 0' # 00:00 Sunday
+
+env:
+  CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
+
+jobs:
+
+  test:
+    name: Test (stable)
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@stable
+        with:
+          toolchain: stable
+          profile: minimal
+          override: true
+      - run: cargo build --verbose
+      - run: cargo test --verbose --package rayon
+      - run: cargo test --verbose --package rayon-core
+      - run: ./ci/highlander.sh
diff --git a/rayon/.github/workflows/pr.yaml b/rayon/.github/workflows/pr.yaml
new file mode 100644
index 0000000..8857620
--- /dev/null
+++ b/rayon/.github/workflows/pr.yaml
@@ -0,0 +1,41 @@
+name: PR
+on: pull_request
+
+# Using 16MB stacks for deep test/debug recursion
+env:
+  CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
+  RUST_MIN_STACK: 16777216
+
+jobs:
+
+  check:
+    name: Check (1.59.0)
+    runs-on: ubuntu-latest
+    env:
+      CARGO_REGISTRIES_CRATES_IO_PROTOCOL: git
+    steps:
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@1.59.0
+      - run: cp ci/compat-Cargo.lock ./Cargo.lock
+      - run: cargo check --verbose --locked
+
+  test:
+    name: Test (stable)
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@stable
+      - run: cargo build --verbose
+      - run: cargo test --verbose --package rayon
+      - run: cargo test --verbose --package rayon-core
+      - run: ./ci/highlander.sh
+
+  fmt:
+    name: Format
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+      - uses: dtolnay/rust-toolchain@1.67.1
+        with:
+          components: rustfmt
+      - run: cargo fmt --all --check
diff --git a/rayon/.gitignore b/rayon/.gitignore
new file mode 100644
index 0000000..274bf18
--- /dev/null
+++ b/rayon/.gitignore
@@ -0,0 +1,6 @@
+Cargo.lock
+target
+*~
+TAGS
+*.bk
+.idea
\ No newline at end of file
diff --git a/rayon/Cargo.toml b/rayon/Cargo.toml
new file mode 100644
index 0000000..a6ccc97
--- /dev/null
+++ b/rayon/Cargo.toml
@@ -0,0 +1,31 @@
+[package]
+name = "rayon"
+version = "1.7.0"
+authors = ["Niko Matsakis <ni...@alum.mit.edu>",
+           "Josh Stone <cu...@gmail.com>"]
+description = "Simple work-stealing parallelism for Rust"
+rust-version = "1.59"
+edition = "2021"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rayon-rs/rayon"
+documentation = "https://docs.rs/rayon/"
+readme = "README.md"
+keywords = ["parallel", "thread", "concurrency", "join", "performance"]
+categories = ["concurrency"]
+exclude = ["/ci/*", "/scripts/*", "/.github/*", "/bors.toml"]
+
+[workspace]
+members = ["rayon-demo", "rayon-core"]
+exclude = ["ci"]
+
+[dependencies]
+rayon-core = { version = "1.11.0", path = "rayon-core" }
+
+# This is a public dependency!
+[dependencies.either]
+version = "1.0"
+default-features = false
+
+[dev-dependencies]
+rand = "0.8"
+rand_xorshift = "0.3"
diff --git a/rayon/FAQ.md b/rayon/FAQ.md
new file mode 100644
index 0000000..745f033
--- /dev/null
+++ b/rayon/FAQ.md
@@ -0,0 +1,227 @@
+# Rayon FAQ
+
+This file is for general questions that don't fit into the README or
+crate docs.
+
+## How many threads will Rayon spawn?
+
+By default, Rayon uses the same number of threads as the number of
+CPUs available. Note that on systems with hyperthreading enabled this
+equals the number of logical cores and not the physical ones.
+
+If you want to alter the number of threads spawned, you can set the
+environmental variable `RAYON_NUM_THREADS` to the desired number of
+threads or use the
+[`ThreadPoolBuilder::build_global` function](https://docs.rs/rayon/*/rayon/struct.ThreadPoolBuilder.html#method.build_global)
+method.
+
+## How does Rayon balance work between threads?
+
+Behind the scenes, Rayon uses a technique called **work stealing** to
+try and dynamically ascertain how much parallelism is available and
+exploit it. The idea is very simple: we always have a pool of worker
+threads available, waiting for some work to do. When you call `join`
+the first time, we shift over into that pool of threads. But if you
+call `join(a, b)` from a worker thread W, then W will place `b` into
+its work queue, advertising that this is work that other worker
+threads might help out with. W will then start executing `a`.
+
+While W is busy with `a`, other threads might come along and take `b`
+from its queue. That is called *stealing* `b`. Once `a` is done, W
+checks whether `b` was stolen by another thread and, if not, executes
+`b` itself. If W runs out of jobs in its own queue, it will look
+through the other threads' queues and try to steal work from them.
+
+This technique is not new. It was first introduced by the
+[Cilk project][cilk], done at MIT in the late nineties. The name Rayon
+is an homage to that work.
+
+[cilk]: http://supertech.csail.mit.edu/cilk/
+
+## What should I do if I use `Rc`, `Cell`, `RefCell` or other non-Send-and-Sync types?
+
+There are a number of non-threadsafe types in the Rust standard library,
+and if your code is using them, you will not be able to combine it
+with Rayon. Similarly, even if you don't have such types, but you try
+to have multiple closures mutating the same state, you will get
+compilation errors; for example, this function won't work, because
+both closures access `slice`:
+
+```rust
+/// Increment all values in slice.
+fn increment_all(slice: &mut [i32]) {
+    rayon::join(|| process(slice), || process(slice));
+}
+```
+
+The correct way to resolve such errors will depend on the case.  Some
+cases are easy: for example, uses of [`Rc`] can typically be replaced
+with [`Arc`], which is basically equivalent, but thread-safe.
+
+Code that uses `Cell` or `RefCell`, however, can be somewhat more complicated.
+If you can refactor your code to avoid those types, that is often the best way
+forward, but otherwise, you can try to replace those types with their threadsafe
+equivalents:
+
+- `Cell` -- replacement: `AtomicUsize`, `AtomicBool`, etc
+- `RefCell` -- replacement: `RwLock`, or perhaps `Mutex`
+
+However, you have to be wary! The parallel versions of these types
+have different atomicity guarantees. For example, with a `Cell`, you
+can increment a counter like so:
+
+```rust
+let value = counter.get();
+counter.set(value + 1);
+```
+
+But when you use the equivalent `AtomicUsize` methods, you are
+actually introducing a potential race condition (not a data race,
+technically, but it can be an awfully fine distinction):
+
+```rust
+let value = tscounter.load(Ordering::SeqCst);
+tscounter.store(value + 1, Ordering::SeqCst);
+```
+
+You can already see that the `AtomicUsize` API is a bit more complex,
+as it requires you to specify an
+[ordering](https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html). (I
+won't go into the details on ordering here, but suffice to say that if
+you don't know what an ordering is, and probably even if you do, you
+should use `Ordering::SeqCst`.) The danger in this parallel version of
+the counter is that other threads might be running at the same time
+and they could cause our counter to get out of sync. For example, if
+we have two threads, then they might both execute the "load" before
+either has a chance to execute the "store":
+
+```
+Thread 1                                          Thread 2
+let value = tscounter.load(Ordering::SeqCst);
+// value = X                                      let value = tscounter.load(Ordering::SeqCst);
+                                                  // value = X
+tscounter.store(value+1);                         tscounter.store(value+1);
+// tscounter = X+1                                // tscounter = X+1
+```
+
+Now even though we've had two increments, we'll only increase the
+counter by one!  Even though we've got no data race, this is still
+probably not the result we wanted. The problem here is that the `Cell`
+API doesn't make clear the scope of a "transaction" -- that is, the
+set of reads/writes that should occur atomically. In this case, we
+probably wanted the get/set to occur together.
+
+In fact, when using the `Atomic` types, you very rarely want a plain
+`load` or plain `store`. You probably want the more complex
+operations. A counter, for example, would use `fetch_add` to
+atomically load and increment the value in one step. Compare-and-swap
+is another popular building block.
+
+A similar problem can arise when converting `RefCell` to `RwLock`, but
+it is somewhat less likely, because the `RefCell` API does in fact
+have a notion of a transaction: the scope of the handle returned by
+`borrow` or `borrow_mut`. So if you convert each call to `borrow` to
+`read` (and `borrow_mut` to `write`), things will mostly work fine in
+a parallel setting, but there can still be changes in behavior.
+Consider using a `handle: RefCell<Vec<i32>>` like:
+
+```rust
+let len = handle.borrow().len();
+for i in 0 .. len {
+    let data = handle.borrow()[i];
+    println!("{}", data);
+}
+```
+
+In sequential code, we know that this loop is safe. But if we convert
+this to parallel code with an `RwLock`, we do not: this is because
+another thread could come along and do
+`handle.write().unwrap().pop()`, and thus change the length of the
+vector. In fact, even in *sequential* code, using very small borrow
+sections like this is an anti-pattern: you ought to be enclosing the
+entire transaction together, like so:
+
+```rust
+let vec = handle.borrow();
+let len = vec.len();
+for i in 0 .. len {
+    let data = vec[i];
+    println!("{}", data);
+}
+```
+
+Or, even better, using an iterator instead of indexing:
+
+```rust
+let vec = handle.borrow();
+for data in vec {
+    println!("{}", data);
+}
+```
+
+There are several reasons to prefer one borrow over many. The most
+obvious is that it is more efficient, since each borrow has to perform
+some safety checks. But it's also more reliable: suppose we modified
+the loop above to not just print things out, but also call into a
+helper function:
+
+```rust
+let vec = handle.borrow();
+for data in vec {
+    helper(...);
+}
+```
+
+And now suppose, independently, this helper fn evolved and had to pop
+something off of the vector:
+
+```rust
+fn helper(...) {
+    handle.borrow_mut().pop();
+}
+```
+
+Under the old model, where we did lots of small borrows, this would
+yield precisely the same error that we saw in parallel land using an
+`RwLock`: the length would be out of sync and our indexing would fail
+(note that in neither case would there be an actual *data race* and
+hence there would never be undefined behavior). But now that we use a
+single borrow, we'll see a borrow error instead, which is much easier
+to diagnose, since it occurs at the point of the `borrow_mut`, rather
+than downstream. Similarly, if we move to an `RwLock`, we'll find that
+the code either deadlocks (if the write is on the same thread as the
+read) or, if the write is on another thread, works just fine. Both of
+these are preferable to random failures in my experience.
+
+## But wait, isn't Rust supposed to free me from this kind of thinking?
+
+You might think that Rust is supposed to mean that you don't have to
+think about atomicity at all. In fact, if you avoid interior
+mutability (`Cell` and `RefCell` in a sequential setting, or
+`AtomicUsize`, `RwLock`, `Mutex`, et al. in parallel code), then this
+is true: the type system will basically guarantee that you don't have
+to think about atomicity at all. But often there are times when you
+WANT threads to interleave in the ways I showed above.
+
+Consider for example when you are conducting a search in parallel, say
+to find the shortest route. To avoid fruitless search, you might want
+to keep a cell with the shortest route you've found thus far.  This
+way, when you are searching down some path that's already longer than
+this shortest route, you can just stop and avoid wasted effort. In
+sequential land, you might model this "best result" as a shared value
+like `Rc<Cell<usize>>` (here the `usize` represents the length of best
+path found so far); in parallel land, you'd use a `Arc<AtomicUsize>`.
+Now we can make our search function look like:
+
+```rust
+fn search(path: &Path, cost_so_far: usize, best_cost: &Arc<AtomicUsize>) {
+    if cost_so_far >= best_cost.load(Ordering::SeqCst) {
+        return;
+    }
+    ...
+    best_cost.store(...);
+}
+```
+
+Now in this case, we really WANT to see results from other threads
+interjected into our execution!
diff --git a/rayon/LICENSE-APACHE b/rayon/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rayon/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rayon/LICENSE-MIT b/rayon/LICENSE-MIT
new file mode 100644
index 0000000..25597d5
--- /dev/null
+++ b/rayon/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2010 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rayon/README.md b/rayon/README.md
new file mode 100644
index 0000000..7f925bc
--- /dev/null
+++ b/rayon/README.md
@@ -0,0 +1,144 @@
+# Rayon
+
+[![Rayon crate](https://img.shields.io/crates/v/rayon.svg)](https://crates.io/crates/rayon)
+[![Rayon documentation](https://docs.rs/rayon/badge.svg)](https://docs.rs/rayon)
+![minimum rustc 1.59](https://img.shields.io/badge/rustc-1.59+-red.svg)
+[![build status](https://github.com/rayon-rs/rayon/workflows/master/badge.svg)](https://github.com/rayon-rs/rayon/actions)
+[![Join the chat at https://gitter.im/rayon-rs/Lobby](https://badges.gitter.im/rayon-rs/Lobby.svg)](https://gitter.im/rayon-rs/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+Rayon is a data-parallelism library for Rust. It is extremely
+lightweight and makes it easy to convert a sequential computation into
+a parallel one. It also guarantees data-race freedom. (You may also
+enjoy [this blog post][blog] about Rayon, which gives more background
+and details about how it works, or [this video][video], from the Rust
+Belt Rust conference.) Rayon is
+[available on crates.io](https://crates.io/crates/rayon), and
+[API documentation is available on docs.rs](https://docs.rs/rayon).
+
+[blog]: https://smallcultfollowing.com/babysteps/blog/2015/12/18/rayon-data-parallelism-in-rust/
+[video]: https://www.youtube.com/watch?v=gof_OEv71Aw
+
+## Parallel iterators and more
+
+Rayon makes it drop-dead simple to convert sequential iterators into
+parallel ones: usually, you just change your `foo.iter()` call into
+`foo.par_iter()`, and Rayon does the rest:
+
+```rust
+use rayon::prelude::*;
+fn sum_of_squares(input: &[i32]) -> i32 {
+    input.par_iter() // <-- just change that!
+         .map(|&i| i * i)
+         .sum()
+}
+```
+
+[Parallel iterators] take care of deciding how to divide your data
+into tasks; it will dynamically adapt for maximum performance. If you
+need more flexibility than that, Rayon also offers the [join] and
+[scope] functions, which let you create parallel tasks on your own.
+For even more control, you can create [custom threadpools] rather than
+using Rayon's default, global threadpool.
+
+[Parallel iterators]: https://docs.rs/rayon/*/rayon/iter/index.html
+[join]: https://docs.rs/rayon/*/rayon/fn.join.html
+[scope]: https://docs.rs/rayon/*/rayon/fn.scope.html
+[custom threadpools]: https://docs.rs/rayon/*/rayon/struct.ThreadPool.html
+
+## No data races
+
+You may have heard that parallel execution can produce all kinds of
+crazy bugs. Well, rest easy. Rayon's APIs all guarantee **data-race
+freedom**, which generally rules out most parallel bugs (though not
+all). In other words, **if your code compiles**, it typically does the
+same thing it did before.
+
+For the most, parallel iterators in particular are guaranteed to
+produce the same results as their sequential counterparts. One caveat:
+If your iterator has side effects (for example, sending methods to
+other threads through a [Rust channel] or writing to disk), those side
+effects may occur in a different order. Note also that, in some cases,
+parallel iterators offer alternative versions of the sequential
+iterator methods that can have higher performance.
+
+[Rust channel]: https://doc.rust-lang.org/std/sync/mpsc/fn.channel.html
+
+## Using Rayon
+
+[Rayon is available on crates.io](https://crates.io/crates/rayon). The
+recommended way to use it is to add a line into your Cargo.toml such
+as:
+
+```toml
+[dependencies]
+rayon = "1.7"
+```
+
+To use the parallel iterator APIs, a number of traits have to be in
+scope. The easiest way to bring those things into scope is to use the
+[Rayon prelude](https://docs.rs/rayon/*/rayon/prelude/index.html). In
+each module where you would like to use the parallel iterator APIs,
+just add:
+
+```rust
+use rayon::prelude::*;
+```
+
+Rayon currently requires `rustc 1.59.0` or greater.
+
+### Usage with WebAssembly
+
+Rayon can work on the Web via WebAssembly, but requires an adapter and
+some project configuration to account for differences between
+WebAssembly threads and threads on the other platforms.
+
+Check out the
+[wasm-bindgen-rayon](https://github.com/GoogleChromeLabs/wasm-bindgen-rayon)
+docs for more details.
+
+## Contribution
+
+Rayon is an open source project! If you'd like to contribute to Rayon,
+check out
+[the list of "help wanted" issues](https://github.com/rayon-rs/rayon/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22).
+These are all (or should be) issues that are suitable for getting
+started, and they generally include a detailed set of instructions for
+what to do. Please ask questions if anything is unclear! Also, check
+out the
+[Guide to Development](https://github.com/rayon-rs/rayon/wiki/Guide-to-Development)
+page on the wiki. Note that all code submitted in PRs to Rayon is
+assumed to
+[be licensed under Rayon's dual MIT/Apache 2.0 licensing](https://github.com/rayon-rs/rayon/blob/master/README.md#license).
+
+## Quick demo
+
+To see Rayon in action, check out the `rayon-demo` directory, which
+includes a number of demos of code using Rayon. For example, run this
+command to get a visualization of an N-body simulation. To see the
+effect of using Rayon, press `s` to run sequentially and `p` to run in
+parallel.
+
+```text
+> cd rayon-demo
+> cargo run --release -- nbody visualize
+```
+
+For more information on demos, try:
+
+```text
+> cd rayon-demo
+> cargo run --release -- --help
+```
+
+## Other questions?
+
+See [the Rayon FAQ][faq].
+
+[faq]: https://github.com/rayon-rs/rayon/blob/master/FAQ.md
+
+## License
+
+Rayon is distributed under the terms of both the MIT license and the
+Apache License (Version 2.0). See [LICENSE-APACHE](LICENSE-APACHE) and
+[LICENSE-MIT](LICENSE-MIT) for details. Opening a pull request is
+assumed to signal agreement with these licensing terms.
diff --git a/rayon/RELEASES.md b/rayon/RELEASES.md
new file mode 100644
index 0000000..28b476d
--- /dev/null
+++ b/rayon/RELEASES.md
@@ -0,0 +1,862 @@
+# Release rayon 1.7.0 / rayon-core 1.11.0 (2023-03-03)
+
+- The minimum supported `rustc` is now 1.59.
+- Added a fallback when threading is unsupported.
+- The new `ParallelIterator::take_any` and `skip_any` methods work like
+  unordered `IndexedParallelIterator::take` and `skip`, counting items in
+  whatever order they are visited in parallel.
+- The new `ParallelIterator::take_any_while` and `skip_any_while` methods work
+  like unordered `Iterator::take_while` and `skip_while`, which previously had
+  no parallel equivalent. The "while" condition may be satisfied from anywhere
+  in the parallel iterator, affecting all future items regardless of position.
+- The new `yield_now` and `yield_local` functions will cooperatively yield
+  execution to Rayon, either trying to execute pending work from the entire
+  pool or from just the local deques of the current thread, respectively.
+
+# Release rayon-core 1.10.2 (2023-01-22)
+
+- Fixed miri-reported UB for SharedReadOnly tags protected by a call.
+
+# Release rayon 1.6.1 (2022-12-09)
+
+- Simplified `par_bridge` to only pull one item at a time from the iterator,
+  without batching. Threads that are waiting for iterator items will now block
+  appropriately rather than spinning CPU. (Thanks @njaard!)
+- Added protection against recursion in `par_bridge`, so iterators that also
+  invoke rayon will not cause mutex recursion deadlocks.
+
+# Release rayon-core 1.10.1 (2022-11-18)
+
+- Fixed a race condition with threads going to sleep while a broadcast starts.
+
+# Release rayon 1.6.0 / rayon-core 1.10.0 (2022-11-18)
+
+- The minimum supported `rustc` is now 1.56.
+- The new `IndexedParallelIterator::fold_chunks` and `fold_chunks_with` methods
+  work like `ParallelIterator::fold` and `fold_with` with fixed-size chunks of
+  items. This may be useful for predictable batching performance, without the
+  allocation overhead of `IndexedParallelIterator::chunks`.
+- New "broadcast" methods run a given function on all threads in the pool.
+  These run at a sort of reduced priority after each thread has exhausted their
+  local work queue, but before they attempt work-stealing from other threads.
+  - The global `broadcast` function and `ThreadPool::broadcast` method will
+    block until completion, returning a `Vec` of all return values.
+  - The global `spawn_broadcast` function and methods on `ThreadPool`, `Scope`,
+    and `ScopeFifo` will run detached, without blocking the current thread.
+- Panicking methods now use `#[track_caller]` to report the caller's location.
+- Fixed a truncated length in `vec::Drain` when given an empty range.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @cuviper
+- @idanmuze
+- @JoeyBF
+- @JustForFun88
+- @kianmeng
+- @kornelski
+- @ritchie46
+- @ryanrussell
+- @steffahn
+- @TheIronBorn
+- @willcrozi
+
+# Release rayon 1.5.3 (2022-05-13)
+
+- The new `ParallelSliceMut::par_sort_by_cached_key` is a stable sort that caches
+  the keys for each item -- a parallel version of `slice::sort_by_cached_key`.
+
+# Release rayon-core 1.9.3 (2022-05-13)
+
+- Fixed a use-after-free race in job notification.
+
+# Release rayon 1.5.2 / rayon-core 1.9.2 (2022-04-13)
+
+- The new `ParallelSlice::par_rchunks()` and `par_rchunks_exact()` iterate
+  slice chunks in reverse, aligned the against the end of the slice if the
+  length is not a perfect multiple of the chunk size. The new
+  `ParallelSliceMut::par_rchunks_mut()` and `par_rchunks_exact_mut()` are the
+  same for mutable slices.
+- The `ParallelIterator::try_*` methods now support `std::ops::ControlFlow` and
+  `std::task::Poll` items, mirroring the unstable `Try` implementations in the
+  standard library.
+- The `ParallelString` pattern-based methods now support `&[char]` patterns,
+  which match when any character in that slice is found in the string.
+- A soft limit is now enforced on the number of threads allowed in a single
+  thread pool, respecting internal bit limits that already existed. The current
+  maximum is publicly available from the new function `max_num_threads()`.
+- Fixed several Stacked Borrow and provenance issues found by `cargo miri`.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @atouchet
+- @bluss
+- @cuviper
+- @fzyzcjy
+- @nyanzebra
+- @paolobarbolini
+- @RReverser
+- @saethlin
+
+# Release rayon 1.5.1 / rayon-core 1.9.1 (2021-05-18)
+
+- The new `in_place_scope` and `in_place_scope_fifo` are variations of `scope`
+  and `scope_fifo`, running the initial non-`Send` callback directly on the
+  current thread, rather than moving execution to the thread pool.
+- With Rust 1.51 or later, arrays now implement `IntoParallelIterator`.
+- New implementations of `FromParallelIterator` make it possible to `collect`
+  complicated nestings of items.
+  - `FromParallelIterator<(A, B)> for (FromA, FromB)` works like `unzip`.
+  - `FromParallelIterator<Either<L, R>> for (A, B)` works like `partition_map`.
+- Type inference now works better with parallel `Range` and `RangeInclusive`.
+- The implementation of `FromParallelIterator` and `ParallelExtend` for
+  `Vec<T>` now uses `MaybeUninit<T>` internally to avoid creating any
+  references to uninitialized data.
+- `ParallelBridge` fixed a bug with threads missing available work.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @atouchet
+- @cuviper
+- @Hywan
+- @iRaiko
+- @Qwaz
+- @rocallahan
+
+# Release rayon 1.5.0 / rayon-core 1.9.0 (2020-10-21)
+
+- Update crossbeam dependencies.
+- The minimum supported `rustc` is now 1.36.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @cuviper
+- @mbrubeck
+- @mrksu
+
+# Release rayon 1.4.1 (2020-09-29)
+
+- The new `flat_map_iter` and `flatten_iter` methods can be used to flatten
+  sequential iterators, which may perform better in cases that don't need the
+  nested parallelism of `flat_map` and `flatten`.
+- The new `par_drain` method is a parallel version of the standard `drain` for
+  collections, removing items while keeping the original capacity. Collections
+  that implement this through `ParallelDrainRange` support draining items from
+  arbitrary index ranges, while `ParallelDrainFull` always drains everything.
+- The new `positions` method finds all items that match the given predicate and
+  returns their indices in a new iterator.
+
+# Release rayon-core 1.8.1 (2020-09-17)
+
+- Fixed an overflow panic on high-contention workloads, for a counter that was
+  meant to simply wrap. This panic only occurred with debug assertions enabled,
+  and was much more likely on 32-bit targets.
+
+# Release rayon 1.4.0 / rayon-core 1.8.0 (2020-08-24)
+
+- Implemented a new thread scheduler, [RFC 5], which uses targeted wakeups for
+  new work and for notifications of completed stolen work, reducing wasteful
+  CPU usage in idle threads.
+- Implemented `IntoParallelIterator for Range<char>` and `RangeInclusive<char>`
+  with the same iteration semantics as Rust 1.45.
+- Relaxed the lifetime requirements of the initial `scope` closure.
+
+[RFC 5]: https://github.com/rayon-rs/rfcs/pull/5
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @CAD97
+- @cuviper
+- @kmaork
+- @nikomatsakis
+- @SuperFluffy
+
+
+# Release rayon 1.3.1 / rayon-core 1.7.1 (2020-06-15)
+
+- Fixed a use-after-free race in calls blocked between two rayon thread pools.
+- Collecting to an indexed `Vec` now drops any partial writes while unwinding,
+  rather than just leaking them. If dropping also panics, Rust will abort.
+  - Note: the old leaking behavior is considered _safe_, just not ideal.
+- The new `IndexedParallelIterator::step_by()` adapts an iterator to step
+  through items by the given count, like `Iterator::step_by()`.
+- The new `ParallelSlice::par_chunks_exact()` and mutable equivalent
+  `ParallelSliceMut::par_chunks_exact_mut()` ensure that the chunks always have
+  the exact length requested, leaving any remainder separate, like the slice
+  methods `chunks_exact()` and `chunks_exact_mut()`.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @adrian5
+- @bluss
+- @cuviper
+- @FlyingCanoe
+- @GuillaumeGomez
+- @matthiasbeyer
+- @picoHz
+- @zesterer
+
+
+# Release rayon 1.3.0 / rayon-core 1.7.0 (2019-12-21)
+
+- Tuples up to length 12 now implement `IntoParallelIterator`, creating a
+  `MultiZip` iterator that produces items as similarly-shaped tuples.
+- The `--cfg=rayon_unstable` supporting code for `rayon-futures` is removed.
+- The minimum supported `rustc` is now 1.31.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @cuviper
+- @c410-f3r
+- @silwol
+
+
+# Release rayon-futures 0.1.1 (2019-12-21)
+
+- `Send` bounds have been added for the `Item` and `Error` associated types on
+  all generic `F: Future` interfaces. While technically a breaking change, this
+  is a soundness fix, so we are not increasing the semantic version for this.
+- This crate is now deprecated, and the `--cfg=rayon_unstable` supporting code
+  will be removed in `rayon-core 1.7.0`. This only supported the now-obsolete
+  `Future` from `futures 0.1`, while support for `std::future::Future` is
+  expected to come directly in `rayon-core` -- although that is not ready yet.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @cuviper
+- @kornelski
+- @jClaireCodesStuff
+- @jwass
+- @seanchen1991
+
+
+# Release rayon 1.2.1 / rayon-core 1.6.1 (2019-11-20)
+
+- Update crossbeam dependencies.
+- Add top-level doc links for the iterator traits.
+- Document that the iterator traits are not object safe.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @cuviper
+- @dnaka91
+- @matklad
+- @nikomatsakis
+- @Qqwy
+- @vorner
+
+
+# Release rayon 1.2.0 / rayon-core 1.6.0 (2019-08-30)
+
+- The new `ParallelIterator::copied()` converts an iterator of references into
+  copied values, like `Iterator::copied()`.
+- `ParallelExtend` is now implemented for the unit `()`.
+- Internal updates were made to improve test determinism, reduce closure type
+  sizes, reduce task allocations, and update dependencies.
+- The minimum supported `rustc` is now 1.28.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @Aaron1011
+- @cuviper
+- @ralfbiedert
+
+
+# Release rayon 1.1.0 / rayon-core 1.5.0 (2019-06-12)
+
+- FIFO spawns are now supported using the new `spawn_fifo()` and `scope_fifo()`
+  global functions, and their corresponding `ThreadPool` methods.
+  - Normally when tasks are queued on a thread, the most recent is processed
+    first (LIFO) while other threads will steal the oldest (FIFO). With FIFO
+    spawns, those tasks are processed locally in FIFO order too.
+  - Regular spawns and other tasks like `join` are not affected.
+  - The `breadth_first` configuration flag, which globally approximated this
+    effect, is now deprecated.
+  - For more design details, please see [RFC 1].
+- `ThreadPoolBuilder` can now take a custom `spawn_handler` to control how
+  threads will be created in the pool.
+  - `ThreadPoolBuilder::build_scoped()` uses this to create a scoped thread
+    pool, where the threads are able to use non-static data.
+  - This may also be used to support threading in exotic environments, like
+    WebAssembly, which don't support the normal `std::thread`.
+- `ParallelIterator` has 3 new methods: `find_map_any()`, `find_map_first()`,
+  and `find_map_last()`, like `Iterator::find_map()` with ordering constraints.
+- The new `ParallelIterator::panic_fuse()` makes a parallel iterator halt as soon
+  as possible if any of its threads panic. Otherwise, the panic state is not
+  usually noticed until the iterator joins its parallel tasks back together.
+- `IntoParallelIterator` is now implemented for integral `RangeInclusive`.
+- Several internal `Folder`s now have optimized `consume_iter` implementations.
+- `rayon_core::current_thread_index()` is now re-exported in `rayon`.
+- The minimum `rustc` is now 1.26, following the update policy defined in [RFC 3].
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @cuviper
+- @didroe
+- @GuillaumeGomez
+- @huonw
+- @janriemer
+- @kornelski
+- @nikomatsakis
+- @seanchen1991
+- @yegeun542
+
+[RFC 1]: https://github.com/rayon-rs/rfcs/blob/master/accepted/rfc0001-scope-scheduling.md
+[RFC 3]: https://github.com/rayon-rs/rfcs/blob/master/accepted/rfc0003-minimum-rustc.md
+
+
+# Release rayon 1.0.3 (2018-11-02)
+
+- `ParallelExtend` is now implemented for tuple pairs, enabling nested
+  `unzip()` and `partition_map()` operations.  For instance, `(A, (B, C))`
+  items can be unzipped into `(Vec<A>, (Vec<B>, Vec<C>))`.
+  - `ParallelExtend<(A, B)>` works like `unzip()`.
+  - `ParallelExtend<Either<A, B>>` works like `partition_map()`.
+- `ParallelIterator` now has a method `map_init()` which calls an `init`
+  function for a value to pair with items, like `map_with()` but dynamically
+  constructed.  That value type has no constraints, not even `Send` or `Sync`.
+  - The new `for_each_init()` is a variant of this for simple iteration.
+  - The new `try_for_each_init()` is a variant for fallible iteration.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @cuviper
+- @dan-zheng
+- @dholbert
+- @ignatenkobrain
+- @mdonoughe
+
+
+# Release rayon 1.0.2 / rayon-core 1.4.1 (2018-07-17)
+
+- The `ParallelBridge` trait with method `par_bridge()` makes it possible to
+  use any `Send`able `Iterator` in parallel!
+  - This trait has been added to `rayon::prelude`.
+  - It automatically implements internal synchronization and queueing to
+    spread the `Item`s across the thread pool.  Iteration order is not
+    preserved by this adaptor.
+  - "Native" Rayon iterators like `par_iter()` should still be preferred when
+    possible for better efficiency.
+- `ParallelString` now has additional methods for parity with `std` string
+  iterators: `par_char_indices()`, `par_bytes()`, `par_encode_utf16()`,
+  `par_matches()`, and `par_match_indices()`.
+- `ParallelIterator` now has fallible methods `try_fold()`, `try_reduce()`,
+  and `try_for_each`, plus `*_with()` variants of each, for automatically
+  short-circuiting iterators on `None` or `Err` values.  These are inspired by
+  `Iterator::try_fold()` and `try_for_each()` that were stabilized in Rust 1.27.
+- `Range<i128>` and `Range<u128>` are now supported with Rust 1.26 and later.
+- Small improvements have been made to the documentation.
+- `rayon-core` now only depends on `rand` for testing.
+- Rayon tests now work on stable Rust.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @AndyGauge
+- @cuviper
+- @ignatenkobrain
+- @LukasKalbertodt
+- @MajorBreakfast
+- @nikomatsakis
+- @paulkernfeld
+- @QuietMisdreavus
+
+
+# Release rayon 1.0.1 (2018-03-16)
+
+- Added more documentation for `rayon::iter::split()`.
+- Corrected links and typos in documentation.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @cuviper
+- @HadrienG2
+- @matthiasbeyer
+- @nikomatsakis
+
+
+# Release rayon 1.0.0 / rayon-core 1.4.0 (2018-02-15)
+
+- `ParallelIterator` added the `update` method which applies a function to
+  mutable references, inspired by `itertools`.
+- `IndexedParallelIterator` added the `chunks` method which yields vectors of
+  consecutive items from the base iterator, inspired by `itertools`.
+- `String` now implements `FromParallelIterator<Cow<str>>` and
+  `ParallelExtend<Cow<str>>`, inspired by `std`.
+- `()` now implements `FromParallelIterator<()>`, inspired by `std`.
+- The new `ThreadPoolBuilder` replaces and deprecates `Configuration`.
+  - Errors from initialization now have the concrete `ThreadPoolBuildError`
+    type, rather than `Box<Error>`, and this type implements `Send` and `Sync`.
+  - `ThreadPool::new` is deprecated in favor of `ThreadPoolBuilder::build`.
+  - `initialize` is deprecated in favor of `ThreadPoolBuilder::build_global`.
+- Examples have been added to most of the parallel iterator methods.
+- A lot of the documentation has been reorganized and extended.
+
+## Breaking changes
+
+- Rayon now requires rustc 1.13 or greater.
+- `IndexedParallelIterator::len` and `ParallelIterator::opt_len` now operate on
+  `&self` instead of `&mut self`.
+- `IndexedParallelIterator::collect_into` is now `collect_into_vec`.
+- `IndexedParallelIterator::unzip_into` is now `unzip_into_vecs`.
+- Rayon no longer exports the deprecated `Configuration` and `initialize` from
+  rayon-core.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @Bilkow
+- @cuviper
+- @Enet4
+- @ignatenkobrain
+- @iwillspeak
+- @jeehoonkang
+- @jwass
+- @Kerollmops
+- @KodrAus
+- @kornelski
+- @MaloJaffre
+- @nikomatsakis
+- @obv-mikhail
+- @oddg
+- @phimuemue
+- @stjepang
+- @tmccombs
+- bors[bot]
+
+
+# Release rayon 0.9.0 / rayon-core 1.3.0 / rayon-futures 0.1.0 (2017-11-09)
+
+- `Configuration` now has a `build` method.
+- `ParallelIterator` added `flatten` and `intersperse`, both inspired by
+  itertools.
+- `IndexedParallelIterator` added `interleave`, `interleave_shortest`, and
+  `zip_eq`, all inspired by itertools.
+- The new functions `iter::empty` and `once` create parallel iterators of
+  exactly zero or one item, like their `std` counterparts.
+- The new functions `iter::repeat` and `repeatn` create parallel iterators
+  repeating an item indefinitely or `n` times, respectively.
+- The new function `join_context` works like `join`, with an added `FnContext`
+  parameter that indicates whether the job was stolen.
+- `Either` (used by `ParallelIterator::partition_map`) is now re-exported from
+  the `either` crate, instead of defining our own type.
+  - `Either` also now implements `ParallelIterator`, `IndexedParallelIterator`,
+    and `ParallelExtend` when both of its `Left` and `Right` types do.
+- All public types now implement `Debug`.
+- Many of the parallel iterators now implement `Clone` where possible.
+- Much of the documentation has been extended. (but still could use more help!)
+- All rayon crates have improved metadata.
+- Rayon was evaluated in the Libz Blitz, leading to many of these improvements.
+- Rayon pull requests are now guarded by bors-ng.
+
+## Futures
+
+The `spawn_future()` method has been refactored into its own `rayon-futures`
+crate, now through a `ScopeFutureExt` trait for `ThreadPool` and `Scope`.  The
+supporting `rayon-core` APIs are still gated by `--cfg rayon_unstable`.
+
+## Breaking changes
+
+- Two breaking changes have been made to `rayon-core`, but since they're fixing
+  soundness bugs, we are considering these _minor_ changes for semver.
+  - `Scope::spawn` now requires `Send` for the closure.
+  - `ThreadPool::install` now requires `Send` for the return value.
+- The `iter::internal` module has been renamed to `iter::plumbing`, to hopefully
+  indicate that while these are low-level details, they're not really internal
+  or private to rayon.  The contents of that module are needed for third-parties
+  to implement new parallel iterators, and we'll treat them with normal semver
+  stability guarantees.
+- The function `rayon::iter::split` is no longer re-exported as `rayon::split`.
+
+## Contributors
+
+Thanks to all of the contributors for this release!
+
+- @AndyGauge
+- @ChristopherDavenport
+- @chrisvittal
+- @cuviper
+- @dns2utf8
+- @dtolnay
+- @frewsxcv
+- @gsquire
+- @Hittherhod
+- @jdr023
+- @laumann
+- @leodasvacas
+- @lvillani
+- @MajorBreakfast
+- @mamuleanu
+- @marmistrz
+- @mbrubeck
+- @mgattozzi
+- @nikomatsakis
+- @smt923
+- @stjepang
+- @tmccombs
+- @vishalsodani
+- bors[bot]
+
+
+# Release rayon 0.8.2 (2017-06-28)
+
+- `ParallelSliceMut` now has six parallel sorting methods with the same
+  variations as the standard library.
+  - `par_sort`, `par_sort_by`, and `par_sort_by_key` perform stable sorts in
+    parallel, using the default order, a custom comparator, or a key extraction
+    function, respectively.
+  - `par_sort_unstable`, `par_sort_unstable_by`, and `par_sort_unstable_by_key`
+    perform unstable sorts with the same comparison options.
+  - Thanks to @stjepang!
+
+
+# Release rayon 0.8.1 / rayon-core 1.2.0 (2017-06-14)
+
+- The following core APIs are being stabilized:
+  - `rayon::spawn()` -- spawns a task into the Rayon threadpool; as it
+    is contained in the global scope (rather than a user-created
+    scope), the task cannot capture anything from the current stack
+    frame.
+  - `ThreadPool::join()`, `ThreadPool::spawn()`, `ThreadPool::scope()`
+    -- convenience APIs for launching new work within a thread-pool.
+- The various iterator adapters are now tagged with `#[must_use]`
+- Parallel iterators now offer a `for_each_with` adapter, similar to
+  `map_with`.
+- We are adopting a new approach to handling the remaining unstable
+  APIs (which primarily pertain to futures integration). As awlays,
+  unstable APIs are intended for experimentation, but do not come with
+  any promise of compatibility (in other words, we might change them
+  in arbitrary ways in any release). Previously, we designated such
+  APIs using a Cargo feature "unstable". Now, we are using a regular
+  `#[cfg]` flag. This means that to see the unstable APIs, you must do
+  `RUSTFLAGS='--cfg rayon_unstable' cargo build`. This is
+  intentionally inconvenient; in particular, if you are a library,
+  then your clients must also modify their environment, signaling
+  their agreement to instability.
+
+
+# Release rayon 0.8.0 / rayon-core 1.1.0 (2017-06-13)
+
+## Rayon 0.8.0
+
+- Added the `map_with` and `fold_with` combinators, which help for
+  passing along state (like channels) that cannot be shared between
+  threads but which can be cloned on each thread split.
+- Added the `while_some` combinator, which helps for writing short-circuiting iterators.
+- Added support for "short-circuiting" collection: e.g., collecting
+  from an iterator producing `Option<T>` or `Result<T, E>` into a
+  `Option<Collection<T>>` or `Result<Collection<T>, E>`.
+- Support `FromParallelIterator` for `Cow`.
+- Removed the deprecated weight APIs.
+- Simplified the parallel iterator trait hierarchy by removing the
+  `BoundedParallelIterator` and `ExactParallelIterator` traits,
+  which were not serving much purpose.
+- Improved documentation.
+- Added some missing `Send` impls.
+- Fixed some small bugs.
+
+## Rayon-core 1.1.0
+
+- We now have more documentation.
+- Renamed the (unstable) methods `spawn_async` and
+  `spawn_future_async` -- which spawn tasks that cannot hold
+  references -- to simply `spawn` and `spawn_future`, respectively.
+- We are now using the coco library for our deque.
+- Individual threadpools can now be configured in "breadth-first"
+  mode, which causes them to execute spawned tasks in the reverse
+  order that they used to.  In some specific scenarios, this can be a
+  win (though it is not generally the right choice).
+- Added top-level functions:
+  - `current_thread_index`, for querying the index of the current worker thread within
+    its thread-pool (previously available as `thread_pool.current_thread_index()`);
+  - `current_thread_has_pending_tasks`, for querying whether the
+    current worker that has an empty task deque or not. This can be
+    useful when deciding whether to spawn a task.
+- The environment variables for controlling Rayon are now
+  `RAYON_NUM_THREADS` and `RAYON_LOG`. The older variables (e.g.,
+  `RAYON_RS_NUM_CPUS` are still supported but deprecated).
+
+## Rayon-demo
+
+- Added a new game-of-life benchmark.
+
+## Contributors
+
+Thanks to the following contributors:
+
+- @ChristopherDavenport
+- @SuperFluffy
+- @antoinewdg
+- @crazymykl
+- @cuviper
+- @glandium
+- @julian-seward1
+- @leodasvacas
+- @leshow
+- @lilianmoraru
+- @mschmo
+- @nikomatsakis
+- @stjepang
+
+
+# Release rayon 0.7.1 / rayon-core 1.0.2 (2017-05-30)
+
+This release is a targeted performance fix for #343, an issue where
+rayon threads could sometimes enter into a spin loop where they would
+be unable to make progress until they are pre-empted.
+
+
+# Release rayon 0.7 / rayon-core 1.0 (2017-04-06)
+
+This release marks the first step towards Rayon 1.0. **For best
+performance, it is important that all Rayon users update to at least
+Rayon 0.7.** This is because, as of Rayon 0.7, we have taken steps to
+ensure that, no matter how many versions of rayon are actively in use,
+there will only be a single global scheduler. This is achieved via the
+`rayon-core` crate, which is being released at version 1.0, and which
+encapsulates the core schedule APIs like `join()`. (Note: the
+`rayon-core` crate is, to some degree, an implementation detail, and
+not intended to be imported directly; it's entire API surface is
+mirrored through the rayon crate.)
+
+We have also done a lot of work reorganizing the API for Rayon 0.7 in
+preparation for 1.0. The names of iterator types have been changed and
+reorganized (but few users are expected to be naming those types
+explicitly anyhow). In addition, a number of parallel iterator methods
+have been adjusted to match those in the standard iterator traits more
+closely. See the "Breaking Changes" section below for
+details.
+
+Finally, Rayon 0.7 includes a number of new features and new parallel
+iterator methods. **As of this release, Rayon's parallel iterators
+have officially reached parity with sequential iterators** -- that is,
+every sequential iterator method that makes any sense in parallel is
+supported in some capacity.
+
+### New features and methods
+
+- The internal `Producer` trait now features `fold_with`, which enables
+  better performance for some parallel iterators.
+- Strings now support `par_split()` and `par_split_whitespace()`.
+- The `Configuration` API is expanded and simplified:
+    - `num_threads(0)` no longer triggers an error
+    - you can now supply a closure to name the Rayon threads that get created
+      by using `Configuration::thread_name`.
+    - you can now inject code when Rayon threads start up and finish
+    - you can now set a custom panic handler to handle panics in various odd situations
+- Threadpools are now able to more gracefully put threads to sleep when not needed.
+- Parallel iterators now support `find_first()`, `find_last()`, `position_first()`,
+  and `position_last()`.
+- Parallel iterators now support `rev()`, which primarily affects subsequent calls
+  to `enumerate()`.
+- The `scope()` API is now considered stable (and part of `rayon-core`).
+- There is now a useful `rayon::split` function for creating custom
+  Rayon parallel iterators.
+- Parallel iterators now allow you to customize the min/max number of
+  items to be processed in a given thread. This mechanism replaces the
+  older `weight` mechanism, which is deprecated.
+- `sum()` and friends now use the standard `Sum` traits
+
+### Breaking changes
+
+In the move towards 1.0, there have been a number of minor breaking changes:
+
+- Configuration setters like `Configuration::set_num_threads()` lost the `set_` prefix,
+  and hence become something like `Configuration::num_threads()`.
+- `Configuration` getters are removed
+- Iterator types have been shuffled around and exposed more consistently:
+    - combinator types live in `rayon::iter`, e.g. `rayon::iter::Filter`
+    - iterators over various types live in a module named after their type,
+      e.g. `rayon::slice::Windows`
+- When doing a `sum()` or `product()`, type annotations are needed for the result
+  since it is now possible to have the resulting sum be of a type other than the value
+  you are iterating over (this mirrors sequential iterators).
+
+### Experimental features
+
+Experimental features require the use of the `unstable` feature. Their
+APIs may change or disappear entirely in future releases (even minor
+releases) and hence they should be avoided for production code.
+
+- We now have (unstable) support for futures integration. You can use
+  `Scope::spawn_future` or `rayon::spawn_future_async()`.
+- There is now a `rayon::spawn_async()` function for using the Rayon
+  threadpool to run tasks that do not have references to the stack.
+
+### Contributors
+
+Thanks to the following people for their contributions to this release:
+
+- @Aaronepower
+- @ChristopherDavenport
+- @bluss
+- @cuviper
+- @froydnj
+- @gaurikholkar
+- @hniksic
+- @leodasvacas
+- @leshow
+- @martinhath
+- @mbrubeck
+- @nikomatsakis
+- @pegomes
+- @schuster
+- @torkleyy
+
+
+# Release 0.6 (2016-12-21)
+
+This release includes a lot of progress towards the goal of parity
+with the sequential iterator API, though there are still a few methods
+that are not yet complete. If you'd like to help with that effort,
+[check out the milestone](https://github.com/rayon-rs/rayon/issues?q=is%3Aopen+is%3Aissue+milestone%3A%22Parity+with+the+%60Iterator%60+trait%22)
+to see the remaining issues.
+
+**Announcement:** @cuviper has been added as a collaborator to the
+Rayon repository for all of his outstanding work on Rayon, which
+includes both internal refactoring and helping to shape the public
+API. Thanks @cuviper! Keep it up.
+
+- We now support `collect()` and not just `collect_with()`.
+  You can use `collect()` to build a number of collections,
+  including vectors, maps, and sets. Moreover, when building a vector
+  with `collect()`, you are no longer limited to exact parallel iterators.
+  Thanks @nikomatsakis, @cuviper!
+- We now support `skip()` and `take()` on parallel iterators.
+  Thanks @martinhath!
+- **Breaking change:** We now match the sequential APIs for `min()` and `max()`.
+  We also support `min_by_key()` and `max_by_key()`. Thanks @tapeinosyne!
+- **Breaking change:** The `mul()` method is now renamed to `product()`,
+  to match sequential iterators. Thanks @jonathandturner!
+- We now support parallel iterator over ranges on `u64` values. Thanks @cuviper!
+- We now offer a `par_chars()` method on strings for iterating over characters
+  in parallel. Thanks @cuviper!
+- We now have new demos: a traveling salesman problem solver as well as matrix
+  multiplication. Thanks @nikomatsakis, @edre!
+- We are now documenting our minimum rustc requirement (currently
+  v1.12.0).  We will attempt to maintain compatibility with rustc
+  stable v1.12.0 as long as it remains convenient, but if new features
+  are stabilized or added that would be helpful to Rayon, or there are
+  bug fixes that we need, we will bump to the most recent rustc. Thanks @cuviper!
+- The `reduce()` functionality now has better inlining.
+  Thanks @bluss!
+- The `join()` function now has some documentation. Thanks @gsquire!
+- The project source has now been fully run through rustfmt.
+  Thanks @ChristopherDavenport!
+- Exposed helper methods for accessing the current thread index.
+  Thanks @bholley!
+
+
+# Release 0.5 (2016-11-04)
+
+- **Breaking change:** The `reduce` method has been vastly
+  simplified, and `reduce_with_identity` has been deprecated.
+- **Breaking change:** The `fold` method has been changed. It used to
+  always reduce the values, but now instead it is a combinator that
+  returns a parallel iterator which can itself be reduced. See the
+  docs for more information.
+- The following parallel iterator combinators are now available (thanks @cuviper!):
+  - `find_any()`: similar to `find` on a sequential iterator,
+    but doesn't necessarily return the *first* matching item
+  - `position_any()`: similar to `position` on a sequential iterator,
+    but doesn't necessarily return the index of *first* matching item
+  - `any()`, `all()`: just like their sequential counterparts
+- The `count()` combinator is now available for parallel iterators.
+- We now build with older versions of rustc again (thanks @durango!),
+  as we removed a stray semicolon from `thread_local!`.
+- Various improvements to the (unstable) `scope()` API implementation.
+
+
+# Release 0.4.3 (2016-10-25)
+
+- Parallel iterators now offer an adaptive weight scheme,
+  which means that explicit weights should no longer
+  be necessary in most cases! Thanks @cuviper!
+  - We are considering removing weights or changing the weight mechanism
+    before 1.0. Examples of scenarios where you still need weights even
+    with this adaptive mechanism would be great. Join the discussion
+    at <https://github.com/rayon-rs/rayon/issues/111>.
+- New (unstable) scoped threads API, see `rayon::scope` for details.
+  - You will need to supply the [cargo feature] `unstable`.
+- The various demos and benchmarks have been consolidated into one
+  program, `rayon-demo`.
+- Optimizations in Rayon's inner workings. Thanks @emilio!
+- Update `num_cpus` to 1.0. Thanks @jamwt!
+- Various internal cleanup in the implementation and typo fixes.
+  Thanks @cuviper, @Eh2406, and @spacejam!
+
+[cargo feature]: https://doc.rust-lang.org/cargo/reference/features.html#the-features-section
+
+
+# Release 0.4.2 (2016-09-15)
+
+- Updated crates.io metadata.
+
+
+# Release 0.4.1 (2016-09-14)
+
+- New `chain` combinator for parallel iterators.
+- `Option`, `Result`, as well as many more collection types now have
+  parallel iterators.
+- New mergesort demo.
+- Misc fixes.
+
+Thanks to @cuviper, @edre, @jdanford, @frewsxcv for their contributions!
+
+
+# Release 0.4 (2016-05-16)
+
+- Make use of latest versions of catch-panic and various fixes to panic propagation.
+- Add new prime sieve demo.
+- Add `cloned()` and `inspect()` combinators.
+- Misc fixes for Rust RFC 1214.
+
+Thanks to @areilb1, @Amanieu, @SharplEr, and @cuviper for their contributions!
+
+
+# Release 0.3 (2016-02-23)
+
+- Expanded `par_iter` APIs now available:
+  - `into_par_iter` is now supported on vectors (taking ownership of the elements)
+- Panic handling is much improved:
+  - if you use the Nightly feature, experimental panic recovery is available
+  - otherwise, panics propagate out and poision the workpool
+- New `Configuration` object to control number of threads and other details
+- New demos and benchmarks
+  - try `cargo run --release -- visualize` in `demo/nbody` :)
+    - Note: a nightly compiler is required for this demo due to the
+      use of the `+=` syntax
+
+Thanks to @bjz, @cuviper, @Amanieu, and @willi-kappler for their contributions!
+
+
+# Release 0.2 and earlier
+
+No release notes were being kept at this time.
diff --git a/rayon/bors.toml b/rayon/bors.toml
new file mode 100644
index 0000000..d5e29f6
--- /dev/null
+++ b/rayon/bors.toml
@@ -0,0 +1,17 @@
+status = [
+  "Check (1.59.0)",
+  "Test (ubuntu-latest, stable)",
+  "Test (ubuntu-latest, stable-i686)",
+  "Test (ubuntu-latest, beta)",
+  "Test (ubuntu-latest, nightly)",
+  "Test (windows-latest, stable)",
+  "Test (windows-latest, beta)",
+  "Test (windows-latest, nightly)",
+  "Test (macos-latest, stable)",
+  "Test (macos-latest, beta)",
+  "Test (macos-latest, nightly)",
+  "Demo (stable)",
+  "Demo (nightly)",
+  "WebAssembly",
+  "Format",
+]
diff --git a/rayon/ci/alt-core/Cargo.toml b/rayon/ci/alt-core/Cargo.toml
new file mode 100644
index 0000000..3a6cedc
--- /dev/null
+++ b/rayon/ci/alt-core/Cargo.toml
@@ -0,0 +1,10 @@
+[package]
+edition = "2018"
+name = "alt-core"
+version = "0.0.0"
+authors = ["Josh Stone <cu...@gmail.com>"]
+links = "rayon-core"
+build = "build.rs"
+publish = false
+
+[dependencies]
diff --git a/rayon/ci/alt-core/build.rs b/rayon/ci/alt-core/build.rs
new file mode 100644
index 0000000..f328e4d
--- /dev/null
+++ b/rayon/ci/alt-core/build.rs
@@ -0,0 +1 @@
+fn main() {}
diff --git a/rayon/ci/alt-core/src/lib.rs b/rayon/ci/alt-core/src/lib.rs
new file mode 100644
index 0000000..e69de29
diff --git a/rayon/ci/compat-Cargo.lock b/rayon/ci/compat-Cargo.lock
new file mode 100644
index 0000000..095c8d9
--- /dev/null
+++ b/rayon/ci/compat-Cargo.lock
@@ -0,0 +1,1781 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "approx"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f2a05fd1bd10b2527e20a2cd32d8873d115b8b39fe219ee25f42a8aca6ba278"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "arrayref"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
+
+[[package]]
+name = "arrayvec"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "backtrace"
+version = "0.3.67"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca"
+dependencies = [
+ "addr2line",
+ "cc",
+ "cfg-if",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+]
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "block"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
+
+[[package]]
+name = "bumpalo"
+version = "3.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535"
+
+[[package]]
+name = "bytemuck"
+version = "1.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea"
+
+[[package]]
+name = "calloop"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a59225be45a478d772ce015d9743e49e92798ece9e34eda9a6aa2a6a7f40192"
+dependencies = [
+ "log",
+ "nix 0.25.1",
+ "slotmap",
+ "thiserror",
+ "vec_map",
+]
+
+[[package]]
+name = "cc"
+version = "1.0.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cgl"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ced0551234e87afee12411d535648dd89d2e7f34c78b753395567aff3d447ff"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "cgmath"
+version = "0.18.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a98d30140e3296250832bbaaff83b27dcd6fa3cc70fb6f1f3e5c9c0023b5317"
+dependencies = [
+ "approx",
+ "num-traits",
+]
+
+[[package]]
+name = "cmake"
+version = "0.1.49"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "cocoa"
+version = "0.24.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f425db7937052c684daec3bd6375c8abe2d146dca4b8b143d6db777c39138f3a"
+dependencies = [
+ "bitflags",
+ "block",
+ "cocoa-foundation",
+ "core-foundation",
+ "core-graphics",
+ "foreign-types 0.3.2",
+ "libc",
+ "objc",
+]
+
+[[package]]
+name = "cocoa-foundation"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ade49b65d560ca58c403a479bb396592b155c0185eada742ee323d1d68d6318"
+dependencies = [
+ "bitflags",
+ "block",
+ "core-foundation",
+ "core-graphics-types",
+ "foreign-types 0.3.2",
+ "libc",
+ "objc",
+]
+
+[[package]]
+name = "core-foundation"
+version = "0.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "core-foundation-sys"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
+
+[[package]]
+name = "core-graphics"
+version = "0.22.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2581bbab3b8ffc6fcbd550bf46c355135d16e9ff2a6ea032ad6b9bf1d7efe4fb"
+dependencies = [
+ "bitflags",
+ "core-foundation",
+ "core-graphics-types",
+ "foreign-types 0.3.2",
+ "libc",
+]
+
+[[package]]
+name = "core-graphics-types"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3a68b68b3446082644c91ac778bf50cd4104bfb002b5a6a7c44cca5a2c70788b"
+dependencies = [
+ "bitflags",
+ "core-foundation",
+ "foreign-types 0.3.2",
+ "libc",
+]
+
+[[package]]
+name = "core-text"
+version = "19.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99d74ada66e07c1cefa18f8abfba765b486f250de2e4a999e5727fc0dd4b4a25"
+dependencies = [
+ "core-foundation",
+ "core-graphics",
+ "foreign-types 0.3.2",
+ "libc",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
+dependencies = [
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695"
+dependencies = [
+ "autocfg",
+ "cfg-if",
+ "crossbeam-utils",
+ "memoffset 0.8.0",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "crossfont"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21fd3add36ea31aba1520aa5288714dd63be506106753226d0eb387a93bc9c45"
+dependencies = [
+ "cocoa",
+ "core-foundation",
+ "core-foundation-sys",
+ "core-graphics",
+ "core-text",
+ "dwrote",
+ "foreign-types 0.5.0",
+ "freetype-rs",
+ "libc",
+ "log",
+ "objc",
+ "once_cell",
+ "pkg-config",
+ "servo-fontconfig",
+ "winapi",
+]
+
+[[package]]
+name = "cty"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35"
+
+[[package]]
+name = "darling"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c"
+dependencies = [
+ "darling_core",
+ "darling_macro",
+]
+
+[[package]]
+name = "darling_core"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610"
+dependencies = [
+ "fnv",
+ "ident_case",
+ "proc-macro2",
+ "quote",
+ "strsim",
+ "syn",
+]
+
+[[package]]
+name = "darling_macro"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835"
+dependencies = [
+ "darling_core",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "dispatch"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
+
+[[package]]
+name = "dlib"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac1b7517328c04c2aa68422fc60a41b92208182142ed04a25879c26c8f878794"
+dependencies = [
+ "libloading",
+]
+
+[[package]]
+name = "doc-comment"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
+
+[[package]]
+name = "docopt"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f3f119846c823f9eafcf953a8f6ffb6ed69bf6240883261a7f13b634579a51f"
+dependencies = [
+ "lazy_static",
+ "regex",
+ "serde",
+ "strsim",
+]
+
+[[package]]
+name = "downcast-rs"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650"
+
+[[package]]
+name = "dwrote"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "439a1c2ba5611ad3ed731280541d36d2e9c4ac5e7fb818a27b604bdc5a6aa65b"
+dependencies = [
+ "lazy_static",
+ "libc",
+ "serde",
+ "serde_derive",
+ "winapi",
+ "wio",
+]
+
+[[package]]
+name = "either"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
+
+[[package]]
+name = "expat-sys"
+version = "2.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "658f19728920138342f68408b7cf7644d90d4784353d8ebc32e7e8663dbe45fa"
+dependencies = [
+ "cmake",
+ "pkg-config",
+]
+
+[[package]]
+name = "fixedbitset"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
+
+[[package]]
+name = "flate2"
+version = "1.0.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
+dependencies = [
+ "crc32fast",
+ "miniz_oxide",
+]
+
+[[package]]
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
+[[package]]
+name = "foreign-types"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
+dependencies = [
+ "foreign-types-shared 0.1.1",
+]
+
+[[package]]
+name = "foreign-types"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965"
+dependencies = [
+ "foreign-types-macros",
+ "foreign-types-shared 0.3.1",
+]
+
+[[package]]
+name = "foreign-types-macros"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8469d0d40519bc608ec6863f1cc88f3f1deee15913f2f3b3e573d81ed38cccc"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "foreign-types-shared"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
+
+[[package]]
+name = "foreign-types-shared"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b"
+
+[[package]]
+name = "freetype-rs"
+version = "0.26.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "74eadec9d0a5c28c54bb9882e54787275152a4e36ce206b45d7451384e5bf5fb"
+dependencies = [
+ "bitflags",
+ "freetype-sys",
+ "libc",
+]
+
+[[package]]
+name = "freetype-sys"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a37d4011c0cc628dfa766fcc195454f4b068d7afdc2adfd28861191d866e731a"
+dependencies = [
+ "cmake",
+ "libc",
+ "pkg-config",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "gimli"
+version = "0.27.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
+
+[[package]]
+name = "gl_generator"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a95dfc23a2b4a9a2f5ab41d194f8bfda3cabec42af4e39f08c339eb2a0c124d"
+dependencies = [
+ "khronos_api",
+ "log",
+ "xml-rs",
+]
+
+[[package]]
+name = "glium"
+version = "0.32.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2766728ecb86014b91d3d687614b32d65aacbbdc887f424a7b03cba3ab593bf"
+dependencies = [
+ "backtrace",
+ "fnv",
+ "gl_generator",
+ "glutin",
+ "lazy_static",
+ "memoffset 0.6.5",
+ "smallvec",
+ "takeable-option",
+]
+
+[[package]]
+name = "glutin"
+version = "0.29.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "444c9ad294fdcaf20ccf6726b78f380b5450275540c9b68ab62f49726ad1c713"
+dependencies = [
+ "cgl",
+ "cocoa",
+ "core-foundation",
+ "glutin_egl_sys",
+ "glutin_gles2_sys",
+ "glutin_glx_sys",
+ "glutin_wgl_sys",
+ "libloading",
+ "log",
+ "objc",
+ "once_cell",
+ "osmesa-sys",
+ "parking_lot",
+ "raw-window-handle 0.5.0",
+ "wayland-client",
+ "wayland-egl",
+ "winapi",
+ "winit",
+]
+
+[[package]]
+name = "glutin_egl_sys"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68900f84b471f31ea1d1355567eb865a2cf446294f06cef8d653ed7bcf5f013d"
+dependencies = [
+ "gl_generator",
+ "winapi",
+]
+
+[[package]]
+name = "glutin_gles2_sys"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e8094e708b730a7c8a1954f4f8a31880af00eb8a1c5b5bf85d28a0a3c6d69103"
+dependencies = [
+ "gl_generator",
+ "objc",
+]
+
+[[package]]
+name = "glutin_glx_sys"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d93d0575865098580c5b3a423188cd959419912ea60b1e48e8b3b526f6d02468"
+dependencies = [
+ "gl_generator",
+ "x11-dl",
+]
+
+[[package]]
+name = "glutin_wgl_sys"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3da5951a1569dbab865c6f2a863efafff193a93caf05538d193e9e3816d21696"
+dependencies = [
+ "gl_generator",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "ident_case"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
+
+[[package]]
+name = "instant"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "jni-sys"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
+
+[[package]]
+name = "js-sys"
+version = "0.3.61"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "khronos_api"
+version = "3.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2db585e1d738fc771bf08a151420d3ed193d9d895a36df7f6f8a9456b911ddc"
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.139"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
+
+[[package]]
+name = "libloading"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "lock_api"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "malloc_buf"
+version = "0.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "memmap2"
+version = "0.5.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "memoffset"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "memoffset"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "minimal-lexical"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
+
+[[package]]
+name = "miniz_oxide"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
+dependencies = [
+ "adler",
+]
+
+[[package]]
+name = "mio"
+version = "0.8.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9"
+dependencies = [
+ "libc",
+ "log",
+ "wasi",
+ "windows-sys 0.45.0",
+]
+
+[[package]]
+name = "ndk"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "451422b7e4718271c8b5b3aadf5adedba43dc76312454b387e98fae0fc951aa0"
+dependencies = [
+ "bitflags",
+ "jni-sys",
+ "ndk-sys",
+ "num_enum",
+ "raw-window-handle 0.5.0",
+ "thiserror",
+]
+
+[[package]]
+name = "ndk-context"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b"
+
+[[package]]
+name = "ndk-glue"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0434fabdd2c15e0aab768ca31d5b7b333717f03cf02037d5a0a3ff3c278ed67f"
+dependencies = [
+ "libc",
+ "log",
+ "ndk",
+ "ndk-context",
+ "ndk-macro",
+ "ndk-sys",
+ "once_cell",
+ "parking_lot",
+]
+
+[[package]]
+name = "ndk-macro"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0df7ac00c4672f9d5aece54ee3347520b7e20f158656c7db2e6de01902eb7a6c"
+dependencies = [
+ "darling",
+ "proc-macro-crate",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "ndk-sys"
+version = "0.4.1+23.1.7779620"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3cf2aae958bd232cac5069850591667ad422d263686d75b52a065f9badeee5a3"
+dependencies = [
+ "jni-sys",
+]
+
+[[package]]
+name = "nix"
+version = "0.24.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069"
+dependencies = [
+ "bitflags",
+ "cfg-if",
+ "libc",
+ "memoffset 0.6.5",
+]
+
+[[package]]
+name = "nix"
+version = "0.25.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4"
+dependencies = [
+ "autocfg",
+ "bitflags",
+ "cfg-if",
+ "libc",
+ "memoffset 0.6.5",
+]
+
+[[package]]
+name = "nom"
+version = "7.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
+dependencies = [
+ "memchr",
+ "minimal-lexical",
+]
+
+[[package]]
+name = "num"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606"
+dependencies = [
+ "num-bigint",
+ "num-complex",
+ "num-integer",
+ "num-iter",
+ "num-rational",
+ "num-traits",
+]
+
+[[package]]
+name = "num-bigint"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f"
+dependencies = [
+ "autocfg",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-complex"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "num-integer"
+version = "0.1.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
+dependencies = [
+ "autocfg",
+ "num-traits",
+]
+
+[[package]]
+name = "num-iter"
+version = "0.1.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"
+dependencies = [
+ "autocfg",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-rational"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0"
+dependencies = [
+ "autocfg",
+ "num-bigint",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
+dependencies = [
+ "hermit-abi",
+ "libc",
+]
+
+[[package]]
+name = "num_enum"
+version = "0.5.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9"
+dependencies = [
+ "num_enum_derive",
+]
+
+[[package]]
+name = "num_enum_derive"
+version = "0.5.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799"
+dependencies = [
+ "proc-macro-crate",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "objc"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1"
+dependencies = [
+ "malloc_buf",
+]
+
+[[package]]
+name = "object"
+version = "0.30.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.17.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
+
+[[package]]
+name = "osmesa-sys"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88cfece6e95d2e717e0872a7f53a8684712ad13822a7979bc760b9c77ec0013b"
+dependencies = [
+ "shared_library",
+]
+
+[[package]]
+name = "parking_lot"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
+dependencies = [
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.9.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "windows-sys 0.45.0",
+]
+
+[[package]]
+name = "percent-encoding"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
+
+[[package]]
+name = "pkg-config"
+version = "0.3.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
+
+[[package]]
+name = "png"
+version = "0.17.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d708eaf860a19b19ce538740d2b4bdeeb8337fa53f7738455e706623ad5c638"
+dependencies = [
+ "bitflags",
+ "crc32fast",
+ "flate2",
+ "miniz_oxide",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
+
+[[package]]
+name = "proc-macro-crate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9"
+dependencies = [
+ "once_cell",
+ "thiserror",
+ "toml",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.51"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rand_xorshift"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "raw-window-handle"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b800beb9b6e7d2df1fe337c9e3d04e3af22a124460fb4c30fcc22c9117cefb41"
+dependencies = [
+ "cty",
+]
+
+[[package]]
+name = "raw-window-handle"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed7e3d950b66e19e0c372f3fa3fbbcf85b1746b571f74e0c2af6042a5c93420a"
+dependencies = [
+ "cty",
+]
+
+[[package]]
+name = "rayon"
+version = "1.7.0"
+dependencies = [
+ "either",
+ "rand",
+ "rand_xorshift",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.11.0"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "libc",
+ "num_cpus",
+ "rand",
+ "rand_xorshift",
+ "scoped-tls",
+]
+
+[[package]]
+name = "rayon-demo"
+version = "0.0.0"
+dependencies = [
+ "cgmath",
+ "doc-comment",
+ "docopt",
+ "fixedbitset",
+ "glium",
+ "libc",
+ "num",
+ "once_cell",
+ "rand",
+ "rand_xorshift",
+ "rayon",
+ "regex",
+ "serde",
+ "winapi",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "regex"
+version = "1.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
+
+[[package]]
+name = "safe_arch"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1ff3d6d9696af502cc3110dacce942840fb06ff4514cad92236ecc455f2ce05"
+dependencies = [
+ "bytemuck",
+]
+
+[[package]]
+name = "scoped-tls"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "sctk-adwaita"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61270629cc6b4d77ec1907db1033d5c2e1a404c412743621981a871dc9c12339"
+dependencies = [
+ "crossfont",
+ "log",
+ "smithay-client-toolkit",
+ "tiny-skia",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.152"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.152"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "servo-fontconfig"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7e3e22fe5fd73d04ebf0daa049d3efe3eae55369ce38ab16d07ddd9ac5c217c"
+dependencies = [
+ "libc",
+ "servo-fontconfig-sys",
+]
+
+[[package]]
+name = "servo-fontconfig-sys"
+version = "5.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e36b879db9892dfa40f95da1c38a835d41634b825fbd8c4c418093d53c24b388"
+dependencies = [
+ "expat-sys",
+ "freetype-sys",
+ "pkg-config",
+]
+
+[[package]]
+name = "shared_library"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11"
+dependencies = [
+ "lazy_static",
+ "libc",
+]
+
+[[package]]
+name = "slotmap"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342"
+dependencies = [
+ "version_check",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
+
+[[package]]
+name = "smithay-client-toolkit"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f307c47d32d2715eb2e0ece5589057820e0e5e70d07c247d1063e844e107f454"
+dependencies = [
+ "bitflags",
+ "calloop",
+ "dlib",
+ "lazy_static",
+ "log",
+ "memmap2",
+ "nix 0.24.3",
+ "pkg-config",
+ "wayland-client",
+ "wayland-cursor",
+ "wayland-protocols",
+]
+
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "takeable-option"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "36ae8932fcfea38b7d3883ae2ab357b0d57a02caaa18ebb4f5ece08beaec4aa0"
+
+[[package]]
+name = "thiserror"
+version = "1.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "tiny-skia"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "642680569bb895b16e4b9d181c60be1ed136fa0c9c7f11d004daf053ba89bf82"
+dependencies = [
+ "arrayref",
+ "arrayvec",
+ "bytemuck",
+ "cfg-if",
+ "png",
+ "safe_arch",
+ "tiny-skia-path",
+]
+
+[[package]]
+name = "tiny-skia-path"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c114d32f0c2ee43d585367cb013dfaba967ab9f62b90d9af0d696e955e70fa6c"
+dependencies = [
+ "arrayref",
+ "bytemuck",
+]
+
+[[package]]
+name = "toml"
+version = "0.5.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
+
+[[package]]
+name = "vec_map"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.84"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d"
+
+[[package]]
+name = "wayland-client"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f3b068c05a039c9f755f881dc50f01732214f5685e379829759088967c46715"
+dependencies = [
+ "bitflags",
+ "downcast-rs",
+ "libc",
+ "nix 0.24.3",
+ "scoped-tls",
+ "wayland-commons",
+ "wayland-scanner",
+ "wayland-sys",
+]
+
+[[package]]
+name = "wayland-commons"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8691f134d584a33a6606d9d717b95c4fa20065605f798a3f350d78dced02a902"
+dependencies = [
+ "nix 0.24.3",
+ "once_cell",
+ "smallvec",
+ "wayland-sys",
+]
+
+[[package]]
+name = "wayland-cursor"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6865c6b66f13d6257bef1cd40cbfe8ef2f150fb8ebbdb1e8e873455931377661"
+dependencies = [
+ "nix 0.24.3",
+ "wayland-client",
+ "xcursor",
+]
+
+[[package]]
+name = "wayland-egl"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "402de949f81a012926d821a2d659f930694257e76dd92b6e0042ceb27be4107d"
+dependencies = [
+ "wayland-client",
+ "wayland-sys",
+]
+
+[[package]]
+name = "wayland-protocols"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b950621f9354b322ee817a23474e479b34be96c2e909c14f7bc0100e9a970bc6"
+dependencies = [
+ "bitflags",
+ "wayland-client",
+ "wayland-commons",
+ "wayland-scanner",
+]
+
+[[package]]
+name = "wayland-scanner"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f4303d8fa22ab852f789e75a967f0a2cdc430a607751c0499bada3e451cbd53"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "xml-rs",
+]
+
+[[package]]
+name = "wayland-sys"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be12ce1a3c39ec7dba25594b97b42cb3195d54953ddb9d3d95a7c3902bc6e9d4"
+dependencies = [
+ "dlib",
+ "lazy_static",
+ "pkg-config",
+]
+
+[[package]]
+name = "web-sys"
+version = "0.3.61"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
+dependencies = [
+ "windows_aarch64_msvc 0.36.1",
+ "windows_i686_gnu 0.36.1",
+ "windows_i686_msvc 0.36.1",
+ "windows_x86_64_gnu 0.36.1",
+ "windows_x86_64_msvc 0.36.1",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.45.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc 0.42.1",
+ "windows_i686_gnu 0.42.1",
+ "windows_i686_msvc 0.42.1",
+ "windows_x86_64_gnu 0.42.1",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc 0.42.1",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd"
+
+[[package]]
+name = "winit"
+version = "0.27.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb796d6fbd86b2fd896c9471e6f04d39d750076ebe5680a3958f00f5ab97657c"
+dependencies = [
+ "bitflags",
+ "cocoa",
+ "core-foundation",
+ "core-graphics",
+ "dispatch",
+ "instant",
+ "libc",
+ "log",
+ "mio",
+ "ndk",
+ "ndk-glue",
+ "objc",
+ "once_cell",
+ "parking_lot",
+ "percent-encoding",
+ "raw-window-handle 0.4.3",
+ "raw-window-handle 0.5.0",
+ "sctk-adwaita",
+ "smithay-client-toolkit",
+ "wasm-bindgen",
+ "wayland-client",
+ "wayland-protocols",
+ "web-sys",
+ "windows-sys 0.36.1",
+ "x11-dl",
+]
+
+[[package]]
+name = "wio"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d129932f4644ac2396cb456385cbf9e63b5b30c6e8dc4820bdca4eb082037a5"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "x11-dl"
+version = "2.21.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38735924fedd5314a6e548792904ed8c6de6636285cb9fec04d5b1db85c1516f"
+dependencies = [
+ "libc",
+ "once_cell",
+ "pkg-config",
+]
+
+[[package]]
+name = "xcursor"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "463705a63313cd4301184381c5e8042f0a7e9b4bb63653f216311d4ae74690b7"
+dependencies = [
+ "nom",
+]
+
+[[package]]
+name = "xml-rs"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3"
diff --git a/rayon/ci/highlander.sh b/rayon/ci/highlander.sh
new file mode 100755
index 0000000..9585c45
--- /dev/null
+++ b/rayon/ci/highlander.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+echo "INFO: There Can Be Only One!" >&2
+
+if cargo build --manifest-path "$DIR/highlander/Cargo.toml"; then
+    echo "ERROR: we built with multiple rayon-core!" >&2
+    exit 1
+fi
+
+echo "PASS: using multiple rayon-core failed." >&2
diff --git a/rayon/ci/highlander/Cargo.toml b/rayon/ci/highlander/Cargo.toml
new file mode 100644
index 0000000..74f2331
--- /dev/null
+++ b/rayon/ci/highlander/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+authors = ["Josh Stone <cu...@gmail.com>"]
+edition = "2018"
+name = "highlander"
+description = "There Can Be Only One"
+version = "0.0.0"
+publish = false
+
+[dependencies]
+
+[dependencies.alt-core]
+optional = false
+path = "../alt-core"
+
+[dependencies.rayon-core]
+optional = false
+path = "../../rayon-core"
diff --git a/rayon/ci/highlander/src/main.rs b/rayon/ci/highlander/src/main.rs
new file mode 100644
index 0000000..f328e4d
--- /dev/null
+++ b/rayon/ci/highlander/src/main.rs
@@ -0,0 +1 @@
+fn main() {}
diff --git a/rayon/rayon-core/Cargo.toml b/rayon/rayon-core/Cargo.toml
new file mode 100644
index 0000000..920ffe5
--- /dev/null
+++ b/rayon/rayon-core/Cargo.toml
@@ -0,0 +1,57 @@
+[package]
+name = "rayon-core"
+version = "1.11.0"
+authors = ["Niko Matsakis <ni...@alum.mit.edu>",
+           "Josh Stone <cu...@gmail.com>"]
+description = "Core APIs for Rayon"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rayon-rs/rayon"
+documentation = "https://docs.rs/rayon/"
+rust-version = "1.59"
+edition = "2021"
+links = "rayon-core"
+build = "build.rs"
+readme = "README.md"
+keywords = ["parallel", "thread", "concurrency", "join", "performance"]
+categories = ["concurrency"]
+
+# Some dependencies may not be their latest version, in order to support older rustc.
+[dependencies]
+num_cpus = "1.2"
+crossbeam-channel = "0.5.0"
+crossbeam-deque = "0.8.1"
+crossbeam-utils = "0.8.0"
+
+[dev-dependencies]
+rand = "0.8"
+rand_xorshift = "0.3"
+scoped-tls = "1.0"
+
+[target.'cfg(unix)'.dev-dependencies]
+libc = "0.2"
+
+[[test]]
+name = "stack_overflow_crash"
+path = "tests/stack_overflow_crash.rs"
+
+# NB: having one [[test]] manually defined means we need to declare them all
+
+[[test]]
+name = "double_init_fail"
+path = "tests/double_init_fail.rs"
+
+[[test]]
+name = "init_zero_threads"
+path = "tests/init_zero_threads.rs"
+
+[[test]]
+name = "scope_join"
+path = "tests/scope_join.rs"
+
+[[test]]
+name = "simple_panic"
+path = "tests/simple_panic.rs"
+
+[[test]]
+name = "scoped_threadpool"
+path = "tests/scoped_threadpool.rs"
diff --git a/rayon/rayon-core/LICENSE-APACHE b/rayon/rayon-core/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rayon/rayon-core/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rayon/rayon-core/LICENSE-MIT b/rayon/rayon-core/LICENSE-MIT
new file mode 100644
index 0000000..25597d5
--- /dev/null
+++ b/rayon/rayon-core/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2010 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rayon/rayon-core/README.md b/rayon/rayon-core/README.md
new file mode 100644
index 0000000..448901b
--- /dev/null
+++ b/rayon/rayon-core/README.md
@@ -0,0 +1,11 @@
+Rayon-core represents the "core, stable" APIs of Rayon: join, scope, and so forth, as well as the ability to create custom thread-pools with ThreadPool.
+
+Maybe worth mentioning: users are not necessarily intended to directly access rayon-core; all its APIs are mirror in the rayon crate. To that end, the examples in the docs use rayon::join and so forth rather than rayon_core::join.
+
+rayon-core aims to never, or almost never, have a breaking change to its API, because each revision of rayon-core also houses the global thread-pool (and hence if you have two simultaneous versions of rayon-core, you have two thread-pools).
+
+Please see [Rayon Docs] for details about using Rayon.
+
+[Rayon Docs]: https://docs.rs/rayon/
+
+Rayon-core currently requires `rustc 1.59.0` or greater.
diff --git a/rayon/rayon-core/build.rs b/rayon/rayon-core/build.rs
new file mode 100644
index 0000000..8771b63
--- /dev/null
+++ b/rayon/rayon-core/build.rs
@@ -0,0 +1,7 @@
+// We need a build script to use `link = "rayon-core"`.  But we're not
+// *actually* linking to anything, just making sure that we're the only
+// rayon-core in use.
+fn main() {
+    // we don't need to rebuild for anything else
+    println!("cargo:rerun-if-changed=build.rs");
+}
diff --git a/rayon/rayon-core/src/broadcast/mod.rs b/rayon/rayon-core/src/broadcast/mod.rs
new file mode 100644
index 0000000..d991c54
--- /dev/null
+++ b/rayon/rayon-core/src/broadcast/mod.rs
@@ -0,0 +1,151 @@
+use crate::job::{ArcJob, StackJob};
+use crate::latch::LatchRef;
+use crate::registry::{Registry, WorkerThread};
+use crate::scope::ScopeLatch;
+use std::fmt;
+use std::marker::PhantomData;
+use std::sync::Arc;
+
+mod test;
+
+/// Executes `op` within every thread in the current threadpool. If this is
+/// called from a non-Rayon thread, it will execute in the global threadpool.
+/// Any attempts to use `join`, `scope`, or parallel iterators will then operate
+/// within that threadpool. When the call has completed on each thread, returns
+/// a vector containing all of their return values.
+///
+/// For more information, see the [`ThreadPool::broadcast()`][m] method.
+///
+/// [m]: struct.ThreadPool.html#method.broadcast
+pub fn broadcast<OP, R>(op: OP) -> Vec<R>
+where
+    OP: Fn(BroadcastContext<'_>) -> R + Sync,
+    R: Send,
+{
+    // We assert that current registry has not terminated.
+    unsafe { broadcast_in(op, &Registry::current()) }
+}
+
+/// Spawns an asynchronous task on every thread in this thread-pool. This task
+/// will run in the implicit, global scope, which means that it may outlast the
+/// current stack frame -- therefore, it cannot capture any references onto the
+/// stack (you will likely need a `move` closure).
+///
+/// For more information, see the [`ThreadPool::spawn_broadcast()`][m] method.
+///
+/// [m]: struct.ThreadPool.html#method.spawn_broadcast
+pub fn spawn_broadcast<OP>(op: OP)
+where
+    OP: Fn(BroadcastContext<'_>) + Send + Sync + 'static,
+{
+    // We assert that current registry has not terminated.
+    unsafe { spawn_broadcast_in(op, &Registry::current()) }
+}
+
+/// Provides context to a closure called by `broadcast`.
+pub struct BroadcastContext<'a> {
+    worker: &'a WorkerThread,
+
+    /// Make sure to prevent auto-traits like `Send` and `Sync`.
+    _marker: PhantomData<&'a mut dyn Fn()>,
+}
+
+impl<'a> BroadcastContext<'a> {
+    pub(super) fn with<R>(f: impl FnOnce(BroadcastContext<'_>) -> R) -> R {
+        let worker_thread = WorkerThread::current();
+        assert!(!worker_thread.is_null());
+        f(BroadcastContext {
+            worker: unsafe { &*worker_thread },
+            _marker: PhantomData,
+        })
+    }
+
+    /// Our index amongst the broadcast threads (ranges from `0..self.num_threads()`).
+    #[inline]
+    pub fn index(&self) -> usize {
+        self.worker.index()
+    }
+
+    /// The number of threads receiving the broadcast in the thread pool.
+    ///
+    /// # Future compatibility note
+    ///
+    /// Future versions of Rayon might vary the number of threads over time, but
+    /// this method will always return the number of threads which are actually
+    /// receiving your particular `broadcast` call.
+    #[inline]
+    pub fn num_threads(&self) -> usize {
+        self.worker.registry().num_threads()
+    }
+}
+
+impl<'a> fmt::Debug for BroadcastContext<'a> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.debug_struct("BroadcastContext")
+            .field("index", &self.index())
+            .field("num_threads", &self.num_threads())
+            .field("pool_id", &self.worker.registry().id())
+            .finish()
+    }
+}
+
+/// Execute `op` on every thread in the pool. It will be executed on each
+/// thread when they have nothing else to do locally, before they try to
+/// steal work from other threads. This function will not return until all
+/// threads have completed the `op`.
+///
+/// Unsafe because `registry` must not yet have terminated.
+pub(super) unsafe fn broadcast_in<OP, R>(op: OP, registry: &Arc<Registry>) -> Vec<R>
+where
+    OP: Fn(BroadcastContext<'_>) -> R + Sync,
+    R: Send,
+{
+    let f = move |injected: bool| {
+        debug_assert!(injected);
+        BroadcastContext::with(&op)
+    };
+
+    let n_threads = registry.num_threads();
+    let current_thread = WorkerThread::current().as_ref();
+    let latch = ScopeLatch::with_count(n_threads, current_thread);
+    let jobs: Vec<_> = (0..n_threads)
+        .map(|_| StackJob::new(&f, LatchRef::new(&latch)))
+        .collect();
+    let job_refs = jobs.iter().map(|job| job.as_job_ref());
+
+    registry.inject_broadcast(job_refs);
+
+    // Wait for all jobs to complete, then collect the results, maybe propagating a panic.
+    latch.wait(current_thread);
+    jobs.into_iter().map(|job| job.into_result()).collect()
+}
+
+/// Execute `op` on every thread in the pool. It will be executed on each
+/// thread when they have nothing else to do locally, before they try to
+/// steal work from other threads. This function returns immediately after
+/// injecting the jobs.
+///
+/// Unsafe because `registry` must not yet have terminated.
+pub(super) unsafe fn spawn_broadcast_in<OP>(op: OP, registry: &Arc<Registry>)
+where
+    OP: Fn(BroadcastContext<'_>) + Send + Sync + 'static,
+{
+    let job = ArcJob::new({
+        let registry = Arc::clone(registry);
+        move || {
+            registry.catch_unwind(|| BroadcastContext::with(&op));
+            registry.terminate(); // (*) permit registry to terminate now
+        }
+    });
+
+    let n_threads = registry.num_threads();
+    let job_refs = (0..n_threads).map(|_| {
+        // Ensure that registry cannot terminate until this job has executed
+        // on each thread. This ref is decremented at the (*) above.
+        registry.increment_terminate_count();
+
+        ArcJob::as_static_job_ref(&job)
+    });
+
+    registry.inject_broadcast(job_refs);
+}
diff --git a/rayon/rayon-core/src/broadcast/test.rs b/rayon/rayon-core/src/broadcast/test.rs
new file mode 100644
index 0000000..3ae11f7
--- /dev/null
+++ b/rayon/rayon-core/src/broadcast/test.rs
@@ -0,0 +1,262 @@
+#![cfg(test)]
+
+use crate::ThreadPoolBuilder;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Arc;
+use std::{thread, time};
+
+#[test]
+fn broadcast_global() {
+    let v = crate::broadcast(|ctx| ctx.index());
+    assert!(v.into_iter().eq(0..crate::current_num_threads()));
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn spawn_broadcast_global() {
+    let (tx, rx) = crossbeam_channel::unbounded();
+    crate::spawn_broadcast(move |ctx| tx.send(ctx.index()).unwrap());
+
+    let mut v: Vec<_> = rx.into_iter().collect();
+    v.sort_unstable();
+    assert!(v.into_iter().eq(0..crate::current_num_threads()));
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn broadcast_pool() {
+    let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    let v = pool.broadcast(|ctx| ctx.index());
+    assert!(v.into_iter().eq(0..7));
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn spawn_broadcast_pool() {
+    let (tx, rx) = crossbeam_channel::unbounded();
+    let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    pool.spawn_broadcast(move |ctx| tx.send(ctx.index()).unwrap());
+
+    let mut v: Vec<_> = rx.into_iter().collect();
+    v.sort_unstable();
+    assert!(v.into_iter().eq(0..7));
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn broadcast_self() {
+    let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    let v = pool.install(|| crate::broadcast(|ctx| ctx.index()));
+    assert!(v.into_iter().eq(0..7));
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn spawn_broadcast_self() {
+    let (tx, rx) = crossbeam_channel::unbounded();
+    let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    pool.spawn(|| crate::spawn_broadcast(move |ctx| tx.send(ctx.index()).unwrap()));
+
+    let mut v: Vec<_> = rx.into_iter().collect();
+    v.sort_unstable();
+    assert!(v.into_iter().eq(0..7));
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn broadcast_mutual() {
+    let count = AtomicUsize::new(0);
+    let pool1 = ThreadPoolBuilder::new().num_threads(3).build().unwrap();
+    let pool2 = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    pool1.install(|| {
+        pool2.broadcast(|_| {
+            pool1.broadcast(|_| {
+                count.fetch_add(1, Ordering::Relaxed);
+            })
+        })
+    });
+    assert_eq!(count.into_inner(), 3 * 7);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn spawn_broadcast_mutual() {
+    let (tx, rx) = crossbeam_channel::unbounded();
+    let pool1 = Arc::new(ThreadPoolBuilder::new().num_threads(3).build().unwrap());
+    let pool2 = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    pool1.spawn({
+        let pool1 = Arc::clone(&pool1);
+        move || {
+            pool2.spawn_broadcast(move |_| {
+                let tx = tx.clone();
+                pool1.spawn_broadcast(move |_| tx.send(()).unwrap())
+            })
+        }
+    });
+    assert_eq!(rx.into_iter().count(), 3 * 7);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn broadcast_mutual_sleepy() {
+    let count = AtomicUsize::new(0);
+    let pool1 = ThreadPoolBuilder::new().num_threads(3).build().unwrap();
+    let pool2 = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    pool1.install(|| {
+        thread::sleep(time::Duration::from_secs(1));
+        pool2.broadcast(|_| {
+            thread::sleep(time::Duration::from_secs(1));
+            pool1.broadcast(|_| {
+                thread::sleep(time::Duration::from_millis(100));
+                count.fetch_add(1, Ordering::Relaxed);
+            })
+        })
+    });
+    assert_eq!(count.into_inner(), 3 * 7);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn spawn_broadcast_mutual_sleepy() {
+    let (tx, rx) = crossbeam_channel::unbounded();
+    let pool1 = Arc::new(ThreadPoolBuilder::new().num_threads(3).build().unwrap());
+    let pool2 = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    pool1.spawn({
+        let pool1 = Arc::clone(&pool1);
+        move || {
+            thread::sleep(time::Duration::from_secs(1));
+            pool2.spawn_broadcast(move |_| {
+                let tx = tx.clone();
+                thread::sleep(time::Duration::from_secs(1));
+                pool1.spawn_broadcast(move |_| {
+                    thread::sleep(time::Duration::from_millis(100));
+                    tx.send(()).unwrap();
+                })
+            })
+        }
+    });
+    assert_eq!(rx.into_iter().count(), 3 * 7);
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
+fn broadcast_panic_one() {
+    let count = AtomicUsize::new(0);
+    let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    let result = crate::unwind::halt_unwinding(|| {
+        pool.broadcast(|ctx| {
+            count.fetch_add(1, Ordering::Relaxed);
+            if ctx.index() == 3 {
+                panic!("Hello, world!");
+            }
+        })
+    });
+    assert_eq!(count.into_inner(), 7);
+    assert!(result.is_err(), "broadcast panic should propagate!");
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
+fn spawn_broadcast_panic_one() {
+    let (tx, rx) = crossbeam_channel::unbounded();
+    let (panic_tx, panic_rx) = crossbeam_channel::unbounded();
+    let pool = ThreadPoolBuilder::new()
+        .num_threads(7)
+        .panic_handler(move |e| panic_tx.send(e).unwrap())
+        .build()
+        .unwrap();
+    pool.spawn_broadcast(move |ctx| {
+        tx.send(()).unwrap();
+        if ctx.index() == 3 {
+            panic!("Hello, world!");
+        }
+    });
+    drop(pool); // including panic_tx
+    assert_eq!(rx.into_iter().count(), 7);
+    assert_eq!(panic_rx.into_iter().count(), 1);
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
+fn broadcast_panic_many() {
+    let count = AtomicUsize::new(0);
+    let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    let result = crate::unwind::halt_unwinding(|| {
+        pool.broadcast(|ctx| {
+            count.fetch_add(1, Ordering::Relaxed);
+            if ctx.index() % 2 == 0 {
+                panic!("Hello, world!");
+            }
+        })
+    });
+    assert_eq!(count.into_inner(), 7);
+    assert!(result.is_err(), "broadcast panic should propagate!");
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
+fn spawn_broadcast_panic_many() {
+    let (tx, rx) = crossbeam_channel::unbounded();
+    let (panic_tx, panic_rx) = crossbeam_channel::unbounded();
+    let pool = ThreadPoolBuilder::new()
+        .num_threads(7)
+        .panic_handler(move |e| panic_tx.send(e).unwrap())
+        .build()
+        .unwrap();
+    pool.spawn_broadcast(move |ctx| {
+        tx.send(()).unwrap();
+        if ctx.index() % 2 == 0 {
+            panic!("Hello, world!");
+        }
+    });
+    drop(pool); // including panic_tx
+    assert_eq!(rx.into_iter().count(), 7);
+    assert_eq!(panic_rx.into_iter().count(), 4);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn broadcast_sleep_race() {
+    let test_duration = time::Duration::from_secs(1);
+    let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    let start = time::Instant::now();
+    while start.elapsed() < test_duration {
+        pool.broadcast(|ctx| {
+            // A slight spread of sleep duration increases the chance that one
+            // of the threads will race in the pool's idle sleep afterward.
+            thread::sleep(time::Duration::from_micros(ctx.index() as u64));
+        });
+    }
+}
+
+#[test]
+fn broadcast_after_spawn_broadcast() {
+    let (tx, rx) = crossbeam_channel::unbounded();
+
+    // Queue a non-blocking spawn_broadcast.
+    crate::spawn_broadcast(move |ctx| tx.send(ctx.index()).unwrap());
+
+    // This blocking broadcast runs after all prior broadcasts.
+    crate::broadcast(|_| {});
+
+    // The spawn_broadcast **must** have run by now on all threads.
+    let mut v: Vec<_> = rx.try_iter().collect();
+    v.sort_unstable();
+    assert!(v.into_iter().eq(0..crate::current_num_threads()));
+}
+
+#[test]
+fn broadcast_after_spawn() {
+    let (tx, rx) = crossbeam_channel::bounded(1);
+
+    // Queue a regular spawn on a thread-local deque.
+    crate::registry::in_worker(move |_, _| {
+        crate::spawn(move || tx.send(22).unwrap());
+    });
+
+    // Broadcast runs after the local deque is empty.
+    crate::broadcast(|_| {});
+
+    // The spawn **must** have run by now.
+    assert_eq!(22, rx.try_recv().unwrap());
+}
diff --git a/rayon/rayon-core/src/compile_fail/mod.rs b/rayon/rayon-core/src/compile_fail/mod.rs
new file mode 100644
index 0000000..f2ec646
--- /dev/null
+++ b/rayon/rayon-core/src/compile_fail/mod.rs
@@ -0,0 +1,7 @@
+// These modules contain `compile_fail` doc tests.
+mod quicksort_race1;
+mod quicksort_race2;
+mod quicksort_race3;
+mod rc_return;
+mod rc_upvar;
+mod scope_join_bad;
diff --git a/rayon/rayon-core/src/compile_fail/quicksort_race1.rs b/rayon/rayon-core/src/compile_fail/quicksort_race1.rs
new file mode 100644
index 0000000..5615033
--- /dev/null
+++ b/rayon/rayon-core/src/compile_fail/quicksort_race1.rs
@@ -0,0 +1,28 @@
+/*! ```compile_fail,E0524
+
+fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+    if v.len() <= 1 {
+        return;
+    }
+
+    let mid = partition(v);
+    let (lo, _hi) = v.split_at_mut(mid);
+    rayon_core::join(|| quick_sort(lo), || quick_sort(lo)); //~ ERROR
+}
+
+fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
+    let pivot = v.len() - 1;
+    let mut i = 0;
+    for j in 0..pivot {
+        if v[j] <= v[pivot] {
+            v.swap(i, j);
+            i += 1;
+        }
+    }
+    v.swap(i, pivot);
+    i
+}
+
+fn main() { }
+
+``` */
diff --git a/rayon/rayon-core/src/compile_fail/quicksort_race2.rs b/rayon/rayon-core/src/compile_fail/quicksort_race2.rs
new file mode 100644
index 0000000..020589c
--- /dev/null
+++ b/rayon/rayon-core/src/compile_fail/quicksort_race2.rs
@@ -0,0 +1,28 @@
+/*! ```compile_fail,E0500
+
+fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+    if v.len() <= 1 {
+        return;
+    }
+
+    let mid = partition(v);
+    let (lo, _hi) = v.split_at_mut(mid);
+    rayon_core::join(|| quick_sort(lo), || quick_sort(v)); //~ ERROR
+}
+
+fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
+    let pivot = v.len() - 1;
+    let mut i = 0;
+    for j in 0..pivot {
+        if v[j] <= v[pivot] {
+            v.swap(i, j);
+            i += 1;
+        }
+    }
+    v.swap(i, pivot);
+    i
+}
+
+fn main() { }
+
+``` */
diff --git a/rayon/rayon-core/src/compile_fail/quicksort_race3.rs b/rayon/rayon-core/src/compile_fail/quicksort_race3.rs
new file mode 100644
index 0000000..16fbf3b
--- /dev/null
+++ b/rayon/rayon-core/src/compile_fail/quicksort_race3.rs
@@ -0,0 +1,28 @@
+/*! ```compile_fail,E0524
+
+fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+    if v.len() <= 1 {
+        return;
+    }
+
+    let mid = partition(v);
+    let (_lo, hi) = v.split_at_mut(mid);
+    rayon_core::join(|| quick_sort(hi), || quick_sort(hi)); //~ ERROR
+}
+
+fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
+    let pivot = v.len() - 1;
+    let mut i = 0;
+    for j in 0..pivot {
+        if v[j] <= v[pivot] {
+            v.swap(i, j);
+            i += 1;
+        }
+    }
+    v.swap(i, pivot);
+    i
+}
+
+fn main() { }
+
+``` */
diff --git a/rayon/rayon-core/src/compile_fail/rc_return.rs b/rayon/rayon-core/src/compile_fail/rc_return.rs
new file mode 100644
index 0000000..93e3a60
--- /dev/null
+++ b/rayon/rayon-core/src/compile_fail/rc_return.rs
@@ -0,0 +1,17 @@
+/** ```compile_fail,E0277
+
+use std::rc::Rc;
+
+rayon_core::join(|| Rc::new(22), || ()); //~ ERROR
+
+``` */
+mod left {}
+
+/** ```compile_fail,E0277
+
+use std::rc::Rc;
+
+rayon_core::join(|| (), || Rc::new(23)); //~ ERROR
+
+``` */
+mod right {}
diff --git a/rayon/rayon-core/src/compile_fail/rc_upvar.rs b/rayon/rayon-core/src/compile_fail/rc_upvar.rs
new file mode 100644
index 0000000..d8aebcf
--- /dev/null
+++ b/rayon/rayon-core/src/compile_fail/rc_upvar.rs
@@ -0,0 +1,9 @@
+/*! ```compile_fail,E0277
+
+use std::rc::Rc;
+
+let r = Rc::new(22);
+rayon_core::join(|| r.clone(), || r.clone());
+//~^ ERROR
+
+``` */
diff --git a/rayon/rayon-core/src/compile_fail/scope_join_bad.rs b/rayon/rayon-core/src/compile_fail/scope_join_bad.rs
new file mode 100644
index 0000000..75e4c5c
--- /dev/null
+++ b/rayon/rayon-core/src/compile_fail/scope_join_bad.rs
@@ -0,0 +1,24 @@
+/*! ```compile_fail,E0373
+
+fn bad_scope<F>(f: F)
+    where F: FnOnce(&i32) + Send,
+{
+    rayon_core::scope(|s| {
+        let x = 22;
+        s.spawn(|_| f(&x)); //~ ERROR `x` does not live long enough
+    });
+}
+
+fn good_scope<F>(f: F)
+    where F: FnOnce(&i32) + Send,
+{
+    let x = 22;
+    rayon_core::scope(|s| {
+        s.spawn(|_| f(&x));
+    });
+}
+
+fn main() {
+}
+
+``` */
diff --git a/rayon/rayon-core/src/job.rs b/rayon/rayon-core/src/job.rs
new file mode 100644
index 0000000..5664bb3
--- /dev/null
+++ b/rayon/rayon-core/src/job.rs
@@ -0,0 +1,270 @@
+use crate::latch::Latch;
+use crate::unwind;
+use crossbeam_deque::{Injector, Steal};
+use std::any::Any;
+use std::cell::UnsafeCell;
+use std::mem;
+use std::sync::Arc;
+
+pub(super) enum JobResult<T> {
+    None,
+    Ok(T),
+    Panic(Box<dyn Any + Send>),
+}
+
+/// A `Job` is used to advertise work for other threads that they may
+/// want to steal. In accordance with time honored tradition, jobs are
+/// arranged in a deque, so that thieves can take from the top of the
+/// deque while the main worker manages the bottom of the deque. This
+/// deque is managed by the `thread_pool` module.
+pub(super) trait Job {
+    /// Unsafe: this may be called from a different thread than the one
+    /// which scheduled the job, so the implementer must ensure the
+    /// appropriate traits are met, whether `Send`, `Sync`, or both.
+    unsafe fn execute(this: *const ());
+}
+
+/// Effectively a Job trait object. Each JobRef **must** be executed
+/// exactly once, or else data may leak.
+///
+/// Internally, we store the job's data in a `*const ()` pointer.  The
+/// true type is something like `*const StackJob<...>`, but we hide
+/// it. We also carry the "execute fn" from the `Job` trait.
+pub(super) struct JobRef {
+    pointer: *const (),
+    execute_fn: unsafe fn(*const ()),
+}
+
+unsafe impl Send for JobRef {}
+unsafe impl Sync for JobRef {}
+
+impl JobRef {
+    /// Unsafe: caller asserts that `data` will remain valid until the
+    /// job is executed.
+    pub(super) unsafe fn new<T>(data: *const T) -> JobRef
+    where
+        T: Job,
+    {
+        // erase types:
+        JobRef {
+            pointer: data as *const (),
+            execute_fn: <T as Job>::execute,
+        }
+    }
+
+    /// Returns an opaque handle that can be saved and compared,
+    /// without making `JobRef` itself `Copy + Eq`.
+    #[inline]
+    pub(super) fn id(&self) -> impl Eq {
+        (self.pointer, self.execute_fn)
+    }
+
+    #[inline]
+    pub(super) unsafe fn execute(self) {
+        (self.execute_fn)(self.pointer)
+    }
+}
+
+/// A job that will be owned by a stack slot. This means that when it
+/// executes it need not free any heap data, the cleanup occurs when
+/// the stack frame is later popped.  The function parameter indicates
+/// `true` if the job was stolen -- executed on a different thread.
+pub(super) struct StackJob<L, F, R>
+where
+    L: Latch + Sync,
+    F: FnOnce(bool) -> R + Send,
+    R: Send,
+{
+    pub(super) latch: L,
+    func: UnsafeCell<Option<F>>,
+    result: UnsafeCell<JobResult<R>>,
+}
+
+impl<L, F, R> StackJob<L, F, R>
+where
+    L: Latch + Sync,
+    F: FnOnce(bool) -> R + Send,
+    R: Send,
+{
+    pub(super) fn new(func: F, latch: L) -> StackJob<L, F, R> {
+        StackJob {
+            latch,
+            func: UnsafeCell::new(Some(func)),
+            result: UnsafeCell::new(JobResult::None),
+        }
+    }
+
+    pub(super) unsafe fn as_job_ref(&self) -> JobRef {
+        JobRef::new(self)
+    }
+
+    pub(super) unsafe fn run_inline(self, stolen: bool) -> R {
+        self.func.into_inner().unwrap()(stolen)
+    }
+
+    pub(super) unsafe fn into_result(self) -> R {
+        self.result.into_inner().into_return_value()
+    }
+}
+
+impl<L, F, R> Job for StackJob<L, F, R>
+where
+    L: Latch + Sync,
+    F: FnOnce(bool) -> R + Send,
+    R: Send,
+{
+    unsafe fn execute(this: *const ()) {
+        let this = &*(this as *const Self);
+        let abort = unwind::AbortIfPanic;
+        let func = (*this.func.get()).take().unwrap();
+        (*this.result.get()) = JobResult::call(func);
+        Latch::set(&this.latch);
+        mem::forget(abort);
+    }
+}
+
+/// Represents a job stored in the heap. Used to implement
+/// `scope`. Unlike `StackJob`, when executed, `HeapJob` simply
+/// invokes a closure, which then triggers the appropriate logic to
+/// signal that the job executed.
+///
+/// (Probably `StackJob` should be refactored in a similar fashion.)
+pub(super) struct HeapJob<BODY>
+where
+    BODY: FnOnce() + Send,
+{
+    job: BODY,
+}
+
+impl<BODY> HeapJob<BODY>
+where
+    BODY: FnOnce() + Send,
+{
+    pub(super) fn new(job: BODY) -> Box<Self> {
+        Box::new(HeapJob { job })
+    }
+
+    /// Creates a `JobRef` from this job -- note that this hides all
+    /// lifetimes, so it is up to you to ensure that this JobRef
+    /// doesn't outlive any data that it closes over.
+    pub(super) unsafe fn into_job_ref(self: Box<Self>) -> JobRef {
+        JobRef::new(Box::into_raw(self))
+    }
+
+    /// Creates a static `JobRef` from this job.
+    pub(super) fn into_static_job_ref(self: Box<Self>) -> JobRef
+    where
+        BODY: 'static,
+    {
+        unsafe { self.into_job_ref() }
+    }
+}
+
+impl<BODY> Job for HeapJob<BODY>
+where
+    BODY: FnOnce() + Send,
+{
+    unsafe fn execute(this: *const ()) {
+        let this = Box::from_raw(this as *mut Self);
+        (this.job)();
+    }
+}
+
+/// Represents a job stored in an `Arc` -- like `HeapJob`, but may
+/// be turned into multiple `JobRef`s and called multiple times.
+pub(super) struct ArcJob<BODY>
+where
+    BODY: Fn() + Send + Sync,
+{
+    job: BODY,
+}
+
+impl<BODY> ArcJob<BODY>
+where
+    BODY: Fn() + Send + Sync,
+{
+    pub(super) fn new(job: BODY) -> Arc<Self> {
+        Arc::new(ArcJob { job })
+    }
+
+    /// Creates a `JobRef` from this job -- note that this hides all
+    /// lifetimes, so it is up to you to ensure that this JobRef
+    /// doesn't outlive any data that it closes over.
+    pub(super) unsafe fn as_job_ref(this: &Arc<Self>) -> JobRef {
+        JobRef::new(Arc::into_raw(Arc::clone(this)))
+    }
+
+    /// Creates a static `JobRef` from this job.
+    pub(super) fn as_static_job_ref(this: &Arc<Self>) -> JobRef
+    where
+        BODY: 'static,
+    {
+        unsafe { Self::as_job_ref(this) }
+    }
+}
+
+impl<BODY> Job for ArcJob<BODY>
+where
+    BODY: Fn() + Send + Sync,
+{
+    unsafe fn execute(this: *const ()) {
+        let this = Arc::from_raw(this as *mut Self);
+        (this.job)();
+    }
+}
+
+impl<T> JobResult<T> {
+    fn call(func: impl FnOnce(bool) -> T) -> Self {
+        match unwind::halt_unwinding(|| func(true)) {
+            Ok(x) => JobResult::Ok(x),
+            Err(x) => JobResult::Panic(x),
+        }
+    }
+
+    /// Convert the `JobResult` for a job that has finished (and hence
+    /// its JobResult is populated) into its return value.
+    ///
+    /// NB. This will panic if the job panicked.
+    pub(super) fn into_return_value(self) -> T {
+        match self {
+            JobResult::None => unreachable!(),
+            JobResult::Ok(x) => x,
+            JobResult::Panic(x) => unwind::resume_unwinding(x),
+        }
+    }
+}
+
+/// Indirect queue to provide FIFO job priority.
+pub(super) struct JobFifo {
+    inner: Injector<JobRef>,
+}
+
+impl JobFifo {
+    pub(super) fn new() -> Self {
+        JobFifo {
+            inner: Injector::new(),
+        }
+    }
+
+    pub(super) unsafe fn push(&self, job_ref: JobRef) -> JobRef {
+        // A little indirection ensures that spawns are always prioritized in FIFO order.  The
+        // jobs in a thread's deque may be popped from the back (LIFO) or stolen from the front
+        // (FIFO), but either way they will end up popping from the front of this queue.
+        self.inner.push(job_ref);
+        JobRef::new(self)
+    }
+}
+
+impl Job for JobFifo {
+    unsafe fn execute(this: *const ()) {
+        // We "execute" a queue by executing its first job, FIFO.
+        let this = &*(this as *const Self);
+        loop {
+            match this.inner.steal() {
+                Steal::Success(job_ref) => break job_ref.execute(),
+                Steal::Empty => panic!("FIFO is empty"),
+                Steal::Retry => {}
+            }
+        }
+    }
+}
diff --git a/rayon/rayon-core/src/join/mod.rs b/rayon/rayon-core/src/join/mod.rs
new file mode 100644
index 0000000..5ab9f6b
--- /dev/null
+++ b/rayon/rayon-core/src/join/mod.rs
@@ -0,0 +1,188 @@
+use crate::job::StackJob;
+use crate::latch::SpinLatch;
+use crate::registry::{self, WorkerThread};
+use crate::unwind;
+use std::any::Any;
+
+use crate::FnContext;
+
+#[cfg(test)]
+mod test;
+
+/// Takes two closures and *potentially* runs them in parallel. It
+/// returns a pair of the results from those closures.
+///
+/// Conceptually, calling `join()` is similar to spawning two threads,
+/// one executing each of the two closures. However, the
+/// implementation is quite different and incurs very low
+/// overhead. The underlying technique is called "work stealing": the
+/// Rayon runtime uses a fixed pool of worker threads and attempts to
+/// only execute code in parallel when there are idle CPUs to handle
+/// it.
+///
+/// When `join` is called from outside the thread pool, the calling
+/// thread will block while the closures execute in the pool.  When
+/// `join` is called within the pool, the calling thread still actively
+/// participates in the thread pool. It will begin by executing closure
+/// A (on the current thread). While it is doing that, it will advertise
+/// closure B as being available for other threads to execute. Once closure A
+/// has completed, the current thread will try to execute closure B;
+/// if however closure B has been stolen, then it will look for other work
+/// while waiting for the thief to fully execute closure B. (This is the
+/// typical work-stealing strategy).
+///
+/// # Examples
+///
+/// This example uses join to perform a quick-sort (note this is not a
+/// particularly optimized implementation: if you **actually** want to
+/// sort for real, you should prefer [the `par_sort` method] offered
+/// by Rayon).
+///
+/// [the `par_sort` method]: ../rayon/slice/trait.ParallelSliceMut.html#method.par_sort
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// let mut v = vec![5, 1, 8, 22, 0, 44];
+/// quick_sort(&mut v);
+/// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]);
+///
+/// fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) {
+///    if v.len() > 1 {
+///        let mid = partition(v);
+///        let (lo, hi) = v.split_at_mut(mid);
+///        rayon::join(|| quick_sort(lo),
+///                    || quick_sort(hi));
+///    }
+/// }
+///
+/// // Partition rearranges all items `<=` to the pivot
+/// // item (arbitrary selected to be the last item in the slice)
+/// // to the first half of the slice. It then returns the
+/// // "dividing point" where the pivot is placed.
+/// fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize {
+///     let pivot = v.len() - 1;
+///     let mut i = 0;
+///     for j in 0..pivot {
+///         if v[j] <= v[pivot] {
+///             v.swap(i, j);
+///             i += 1;
+///         }
+///     }
+///     v.swap(i, pivot);
+///     i
+/// }
+/// ```
+///
+/// # Warning about blocking I/O
+///
+/// The assumption is that the closures given to `join()` are
+/// CPU-bound tasks that do not perform I/O or other blocking
+/// operations. If you do perform I/O, and that I/O should block
+/// (e.g., waiting for a network request), the overall performance may
+/// be poor.  Moreover, if you cause one closure to be blocked waiting
+/// on another (for example, using a channel), that could lead to a
+/// deadlock.
+///
+/// # Panics
+///
+/// No matter what happens, both closures will always be executed.  If
+/// a single closure panics, whether it be the first or second
+/// closure, that panic will be propagated and hence `join()` will
+/// panic with the same panic value. If both closures panic, `join()`
+/// will panic with the panic value from the first closure.
+pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
+where
+    A: FnOnce() -> RA + Send,
+    B: FnOnce() -> RB + Send,
+    RA: Send,
+    RB: Send,
+{
+    #[inline]
+    fn call<R>(f: impl FnOnce() -> R) -> impl FnOnce(FnContext) -> R {
+        move |_| f()
+    }
+
+    join_context(call(oper_a), call(oper_b))
+}
+
+/// Identical to `join`, except that the closures have a parameter
+/// that provides context for the way the closure has been called,
+/// especially indicating whether they're executing on a different
+/// thread than where `join_context` was called.  This will occur if
+/// the second job is stolen by a different thread, or if
+/// `join_context` was called from outside the thread pool to begin
+/// with.
+pub fn join_context<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
+where
+    A: FnOnce(FnContext) -> RA + Send,
+    B: FnOnce(FnContext) -> RB + Send,
+    RA: Send,
+    RB: Send,
+{
+    #[inline]
+    fn call_a<R>(f: impl FnOnce(FnContext) -> R, injected: bool) -> impl FnOnce() -> R {
+        move || f(FnContext::new(injected))
+    }
+
+    #[inline]
+    fn call_b<R>(f: impl FnOnce(FnContext) -> R) -> impl FnOnce(bool) -> R {
+        move |migrated| f(FnContext::new(migrated))
+    }
+
+    registry::in_worker(|worker_thread, injected| unsafe {
+        // Create virtual wrapper for task b; this all has to be
+        // done here so that the stack frame can keep it all live
+        // long enough.
+        let job_b = StackJob::new(call_b(oper_b), SpinLatch::new(worker_thread));
+        let job_b_ref = job_b.as_job_ref();
+        let job_b_id = job_b_ref.id();
+        worker_thread.push(job_b_ref);
+
+        // Execute task a; hopefully b gets stolen in the meantime.
+        let status_a = unwind::halt_unwinding(call_a(oper_a, injected));
+        let result_a = match status_a {
+            Ok(v) => v,
+            Err(err) => join_recover_from_panic(worker_thread, &job_b.latch, err),
+        };
+
+        // Now that task A has finished, try to pop job B from the
+        // local stack.  It may already have been popped by job A; it
+        // may also have been stolen. There may also be some tasks
+        // pushed on top of it in the stack, and we will have to pop
+        // those off to get to it.
+        while !job_b.latch.probe() {
+            if let Some(job) = worker_thread.take_local_job() {
+                if job_b_id == job.id() {
+                    // Found it! Let's run it.
+                    //
+                    // Note that this could panic, but it's ok if we unwind here.
+                    let result_b = job_b.run_inline(injected);
+                    return (result_a, result_b);
+                } else {
+                    worker_thread.execute(job);
+                }
+            } else {
+                // Local deque is empty. Time to steal from other
+                // threads.
+                worker_thread.wait_until(&job_b.latch);
+                debug_assert!(job_b.latch.probe());
+                break;
+            }
+        }
+
+        (result_a, job_b.into_result())
+    })
+}
+
+/// If job A panics, we still cannot return until we are sure that job
+/// B is complete. This is because it may contain references into the
+/// enclosing stack frame(s).
+#[cold] // cold path
+unsafe fn join_recover_from_panic(
+    worker_thread: &WorkerThread,
+    job_b_latch: &SpinLatch<'_>,
+    err: Box<dyn Any + Send>,
+) -> ! {
+    worker_thread.wait_until(job_b_latch);
+    unwind::resume_unwinding(err)
+}
diff --git a/rayon/rayon-core/src/join/test.rs b/rayon/rayon-core/src/join/test.rs
new file mode 100644
index 0000000..b303dbc
--- /dev/null
+++ b/rayon/rayon-core/src/join/test.rs
@@ -0,0 +1,151 @@
+//! Tests for the join code.
+
+use crate::join::*;
+use crate::unwind;
+use crate::ThreadPoolBuilder;
+use rand::distributions::Standard;
+use rand::{Rng, SeedableRng};
+use rand_xorshift::XorShiftRng;
+
+fn quick_sort<T: PartialOrd + Send>(v: &mut [T]) {
+    if v.len() <= 1 {
+        return;
+    }
+
+    let mid = partition(v);
+    let (lo, hi) = v.split_at_mut(mid);
+    join(|| quick_sort(lo), || quick_sort(hi));
+}
+
+fn partition<T: PartialOrd + Send>(v: &mut [T]) -> usize {
+    let pivot = v.len() - 1;
+    let mut i = 0;
+    for j in 0..pivot {
+        if v[j] <= v[pivot] {
+            v.swap(i, j);
+            i += 1;
+        }
+    }
+    v.swap(i, pivot);
+    i
+}
+
+fn seeded_rng() -> XorShiftRng {
+    let mut seed = <XorShiftRng as SeedableRng>::Seed::default();
+    (0..).zip(seed.as_mut()).for_each(|(i, x)| *x = i);
+    XorShiftRng::from_seed(seed)
+}
+
+#[test]
+fn sort() {
+    let rng = seeded_rng();
+    let mut data: Vec<u32> = rng.sample_iter(&Standard).take(6 * 1024).collect();
+    let mut sorted_data = data.clone();
+    sorted_data.sort();
+    quick_sort(&mut data);
+    assert_eq!(data, sorted_data);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn sort_in_pool() {
+    let rng = seeded_rng();
+    let mut data: Vec<u32> = rng.sample_iter(&Standard).take(12 * 1024).collect();
+
+    let pool = ThreadPoolBuilder::new().build().unwrap();
+    let mut sorted_data = data.clone();
+    sorted_data.sort();
+    pool.install(|| quick_sort(&mut data));
+    assert_eq!(data, sorted_data);
+}
+
+#[test]
+#[should_panic(expected = "Hello, world!")]
+fn panic_propagate_a() {
+    join(|| panic!("Hello, world!"), || ());
+}
+
+#[test]
+#[should_panic(expected = "Hello, world!")]
+fn panic_propagate_b() {
+    join(|| (), || panic!("Hello, world!"));
+}
+
+#[test]
+#[should_panic(expected = "Hello, world!")]
+fn panic_propagate_both() {
+    join(|| panic!("Hello, world!"), || panic!("Goodbye, world!"));
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
+fn panic_b_still_executes() {
+    let mut x = false;
+    match unwind::halt_unwinding(|| join(|| panic!("Hello, world!"), || x = true)) {
+        Ok(_) => panic!("failed to propagate panic from closure A,"),
+        Err(_) => assert!(x, "closure b failed to execute"),
+    }
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn join_context_both() {
+    // If we're not in a pool, both should be marked stolen as they're injected.
+    let (a_migrated, b_migrated) = join_context(|a| a.migrated(), |b| b.migrated());
+    assert!(a_migrated);
+    assert!(b_migrated);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn join_context_neither() {
+    // If we're already in a 1-thread pool, neither job should be stolen.
+    let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+    let (a_migrated, b_migrated) =
+        pool.install(|| join_context(|a| a.migrated(), |b| b.migrated()));
+    assert!(!a_migrated);
+    assert!(!b_migrated);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn join_context_second() {
+    use std::sync::Barrier;
+
+    // If we're already in a 2-thread pool, the second job should be stolen.
+    let barrier = Barrier::new(2);
+    let pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap();
+    let (a_migrated, b_migrated) = pool.install(|| {
+        join_context(
+            |a| {
+                barrier.wait();
+                a.migrated()
+            },
+            |b| {
+                barrier.wait();
+                b.migrated()
+            },
+        )
+    });
+    assert!(!a_migrated);
+    assert!(b_migrated);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn join_counter_overflow() {
+    const MAX: u32 = 500_000;
+
+    let mut i = 0;
+    let mut j = 0;
+    let pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap();
+
+    // Hammer on join a bunch of times -- used to hit overflow debug-assertions
+    // in JEC on 32-bit targets: https://github.com/rayon-rs/rayon/issues/797
+    for _ in 0..MAX {
+        pool.join(|| i += 1, || j += 1);
+    }
+
+    assert_eq!(i, MAX);
+    assert_eq!(j, MAX);
+}
diff --git a/rayon/rayon-core/src/latch.rs b/rayon/rayon-core/src/latch.rs
new file mode 100644
index 0000000..de43272
--- /dev/null
+++ b/rayon/rayon-core/src/latch.rs
@@ -0,0 +1,414 @@
+use std::marker::PhantomData;
+use std::ops::Deref;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Arc, Condvar, Mutex};
+use std::usize;
+
+use crate::registry::{Registry, WorkerThread};
+
+/// We define various kinds of latches, which are all a primitive signaling
+/// mechanism. A latch starts as false. Eventually someone calls `set()` and
+/// it becomes true. You can test if it has been set by calling `probe()`.
+///
+/// Some kinds of latches, but not all, support a `wait()` operation
+/// that will wait until the latch is set, blocking efficiently. That
+/// is not part of the trait since it is not possibly to do with all
+/// latches.
+///
+/// The intention is that `set()` is called once, but `probe()` may be
+/// called any number of times. Once `probe()` returns true, the memory
+/// effects that occurred before `set()` become visible.
+///
+/// It'd probably be better to refactor the API into two paired types,
+/// but that's a bit of work, and this is not a public API.
+///
+/// ## Memory ordering
+///
+/// Latches need to guarantee two things:
+///
+/// - Once `probe()` returns true, all memory effects from the `set()`
+///   are visible (in other words, the set should synchronize-with
+///   the probe).
+/// - Once `set()` occurs, the next `probe()` *will* observe it.  This
+///   typically requires a seq-cst ordering. See [the "tickle-then-get-sleepy" scenario in the sleep
+///   README](/src/sleep/README.md#tickle-then-get-sleepy) for details.
+pub(super) trait Latch {
+    /// Set the latch, signalling others.
+    ///
+    /// # WARNING
+    ///
+    /// Setting a latch triggers other threads to wake up and (in some
+    /// cases) complete. This may, in turn, cause memory to be
+    /// deallocated and so forth. One must be very careful about this,
+    /// and it's typically better to read all the fields you will need
+    /// to access *before* a latch is set!
+    ///
+    /// This function operates on `*const Self` instead of `&self` to allow it
+    /// to become dangling during this call. The caller must ensure that the
+    /// pointer is valid upon entry, and not invalidated during the call by any
+    /// actions other than `set` itself.
+    unsafe fn set(this: *const Self);
+}
+
+pub(super) trait AsCoreLatch {
+    fn as_core_latch(&self) -> &CoreLatch;
+}
+
+/// Latch is not set, owning thread is awake
+const UNSET: usize = 0;
+
+/// Latch is not set, owning thread is going to sleep on this latch
+/// (but has not yet fallen asleep).
+const SLEEPY: usize = 1;
+
+/// Latch is not set, owning thread is asleep on this latch and
+/// must be awoken.
+const SLEEPING: usize = 2;
+
+/// Latch is set.
+const SET: usize = 3;
+
+/// Spin latches are the simplest, most efficient kind, but they do
+/// not support a `wait()` operation. They just have a boolean flag
+/// that becomes true when `set()` is called.
+#[derive(Debug)]
+pub(super) struct CoreLatch {
+    state: AtomicUsize,
+}
+
+impl CoreLatch {
+    #[inline]
+    fn new() -> Self {
+        Self {
+            state: AtomicUsize::new(0),
+        }
+    }
+
+    /// Returns the address of this core latch as an integer. Used
+    /// for logging.
+    #[inline]
+    pub(super) fn addr(&self) -> usize {
+        self as *const CoreLatch as usize
+    }
+
+    /// Invoked by owning thread as it prepares to sleep. Returns true
+    /// if the owning thread may proceed to fall asleep, false if the
+    /// latch was set in the meantime.
+    #[inline]
+    pub(super) fn get_sleepy(&self) -> bool {
+        self.state
+            .compare_exchange(UNSET, SLEEPY, Ordering::SeqCst, Ordering::Relaxed)
+            .is_ok()
+    }
+
+    /// Invoked by owning thread as it falls asleep sleep. Returns
+    /// true if the owning thread should block, or false if the latch
+    /// was set in the meantime.
+    #[inline]
+    pub(super) fn fall_asleep(&self) -> bool {
+        self.state
+            .compare_exchange(SLEEPY, SLEEPING, Ordering::SeqCst, Ordering::Relaxed)
+            .is_ok()
+    }
+
+    /// Invoked by owning thread as it falls asleep sleep. Returns
+    /// true if the owning thread should block, or false if the latch
+    /// was set in the meantime.
+    #[inline]
+    pub(super) fn wake_up(&self) {
+        if !self.probe() {
+            let _ =
+                self.state
+                    .compare_exchange(SLEEPING, UNSET, Ordering::SeqCst, Ordering::Relaxed);
+        }
+    }
+
+    /// Set the latch. If this returns true, the owning thread was sleeping
+    /// and must be awoken.
+    ///
+    /// This is private because, typically, setting a latch involves
+    /// doing some wakeups; those are encapsulated in the surrounding
+    /// latch code.
+    #[inline]
+    unsafe fn set(this: *const Self) -> bool {
+        let old_state = (*this).state.swap(SET, Ordering::AcqRel);
+        old_state == SLEEPING
+    }
+
+    /// Test if this latch has been set.
+    #[inline]
+    pub(super) fn probe(&self) -> bool {
+        self.state.load(Ordering::Acquire) == SET
+    }
+}
+
+/// Spin latches are the simplest, most efficient kind, but they do
+/// not support a `wait()` operation. They just have a boolean flag
+/// that becomes true when `set()` is called.
+pub(super) struct SpinLatch<'r> {
+    core_latch: CoreLatch,
+    registry: &'r Arc<Registry>,
+    target_worker_index: usize,
+    cross: bool,
+}
+
+impl<'r> SpinLatch<'r> {
+    /// Creates a new spin latch that is owned by `thread`. This means
+    /// that `thread` is the only thread that should be blocking on
+    /// this latch -- it also means that when the latch is set, we
+    /// will wake `thread` if it is sleeping.
+    #[inline]
+    pub(super) fn new(thread: &'r WorkerThread) -> SpinLatch<'r> {
+        SpinLatch {
+            core_latch: CoreLatch::new(),
+            registry: thread.registry(),
+            target_worker_index: thread.index(),
+            cross: false,
+        }
+    }
+
+    /// Creates a new spin latch for cross-threadpool blocking.  Notably, we
+    /// need to make sure the registry is kept alive after setting, so we can
+    /// safely call the notification.
+    #[inline]
+    pub(super) fn cross(thread: &'r WorkerThread) -> SpinLatch<'r> {
+        SpinLatch {
+            cross: true,
+            ..SpinLatch::new(thread)
+        }
+    }
+
+    #[inline]
+    pub(super) fn probe(&self) -> bool {
+        self.core_latch.probe()
+    }
+}
+
+impl<'r> AsCoreLatch for SpinLatch<'r> {
+    #[inline]
+    fn as_core_latch(&self) -> &CoreLatch {
+        &self.core_latch
+    }
+}
+
+impl<'r> Latch for SpinLatch<'r> {
+    #[inline]
+    unsafe fn set(this: *const Self) {
+        let cross_registry;
+
+        let registry: &Registry = if (*this).cross {
+            // Ensure the registry stays alive while we notify it.
+            // Otherwise, it would be possible that we set the spin
+            // latch and the other thread sees it and exits, causing
+            // the registry to be deallocated, all before we get a
+            // chance to invoke `registry.notify_worker_latch_is_set`.
+            cross_registry = Arc::clone((*this).registry);
+            &cross_registry
+        } else {
+            // If this is not a "cross-registry" spin-latch, then the
+            // thread which is performing `set` is itself ensuring
+            // that the registry stays alive. However, that doesn't
+            // include this *particular* `Arc` handle if the waiting
+            // thread then exits, so we must completely dereference it.
+            (*this).registry
+        };
+        let target_worker_index = (*this).target_worker_index;
+
+        // NOTE: Once we `set`, the target may proceed and invalidate `this`!
+        if CoreLatch::set(&(*this).core_latch) {
+            // Subtle: at this point, we can no longer read from
+            // `self`, because the thread owning this spin latch may
+            // have awoken and deallocated the latch. Therefore, we
+            // only use fields whose values we already read.
+            registry.notify_worker_latch_is_set(target_worker_index);
+        }
+    }
+}
+
+/// A Latch starts as false and eventually becomes true. You can block
+/// until it becomes true.
+#[derive(Debug)]
+pub(super) struct LockLatch {
+    m: Mutex<bool>,
+    v: Condvar,
+}
+
+impl LockLatch {
+    #[inline]
+    pub(super) fn new() -> LockLatch {
+        LockLatch {
+            m: Mutex::new(false),
+            v: Condvar::new(),
+        }
+    }
+
+    /// Block until latch is set, then resets this lock latch so it can be reused again.
+    pub(super) fn wait_and_reset(&self) {
+        let mut guard = self.m.lock().unwrap();
+        while !*guard {
+            guard = self.v.wait(guard).unwrap();
+        }
+        *guard = false;
+    }
+
+    /// Block until latch is set.
+    pub(super) fn wait(&self) {
+        let mut guard = self.m.lock().unwrap();
+        while !*guard {
+            guard = self.v.wait(guard).unwrap();
+        }
+    }
+}
+
+impl Latch for LockLatch {
+    #[inline]
+    unsafe fn set(this: *const Self) {
+        let mut guard = (*this).m.lock().unwrap();
+        *guard = true;
+        (*this).v.notify_all();
+    }
+}
+
+/// Counting latches are used to implement scopes. They track a
+/// counter. Unlike other latches, calling `set()` does not
+/// necessarily make the latch be considered `set()`; instead, it just
+/// decrements the counter. The latch is only "set" (in the sense that
+/// `probe()` returns true) once the counter reaches zero.
+///
+/// Note: like a `SpinLatch`, count laches are always associated with
+/// some registry that is probing them, which must be tickled when
+/// they are set. *Unlike* a `SpinLatch`, they don't themselves hold a
+/// reference to that registry. This is because in some cases the
+/// registry owns the count-latch, and that would create a cycle. So a
+/// `CountLatch` must be given a reference to its owning registry when
+/// it is set. For this reason, it does not implement the `Latch`
+/// trait (but it doesn't have to, as it is not used in those generic
+/// contexts).
+#[derive(Debug)]
+pub(super) struct CountLatch {
+    core_latch: CoreLatch,
+    counter: AtomicUsize,
+}
+
+impl CountLatch {
+    #[inline]
+    pub(super) fn new() -> CountLatch {
+        Self::with_count(1)
+    }
+
+    #[inline]
+    pub(super) fn with_count(n: usize) -> CountLatch {
+        CountLatch {
+            core_latch: CoreLatch::new(),
+            counter: AtomicUsize::new(n),
+        }
+    }
+
+    #[inline]
+    pub(super) fn increment(&self) {
+        debug_assert!(!self.core_latch.probe());
+        self.counter.fetch_add(1, Ordering::Relaxed);
+    }
+
+    /// Decrements the latch counter by one. If this is the final
+    /// count, then the latch is **set**, and calls to `probe()` will
+    /// return true. Returns whether the latch was set.
+    #[inline]
+    pub(super) unsafe fn set(this: *const Self) -> bool {
+        if (*this).counter.fetch_sub(1, Ordering::SeqCst) == 1 {
+            CoreLatch::set(&(*this).core_latch);
+            true
+        } else {
+            false
+        }
+    }
+
+    /// Decrements the latch counter by one and possibly set it.  If
+    /// the latch is set, then the specific worker thread is tickled,
+    /// which should be the one that owns this latch.
+    #[inline]
+    pub(super) unsafe fn set_and_tickle_one(
+        this: *const Self,
+        registry: &Registry,
+        target_worker_index: usize,
+    ) {
+        if Self::set(this) {
+            registry.notify_worker_latch_is_set(target_worker_index);
+        }
+    }
+}
+
+impl AsCoreLatch for CountLatch {
+    #[inline]
+    fn as_core_latch(&self) -> &CoreLatch {
+        &self.core_latch
+    }
+}
+
+#[derive(Debug)]
+pub(super) struct CountLockLatch {
+    lock_latch: LockLatch,
+    counter: AtomicUsize,
+}
+
+impl CountLockLatch {
+    #[inline]
+    pub(super) fn with_count(n: usize) -> CountLockLatch {
+        CountLockLatch {
+            lock_latch: LockLatch::new(),
+            counter: AtomicUsize::new(n),
+        }
+    }
+
+    #[inline]
+    pub(super) fn increment(&self) {
+        let old_counter = self.counter.fetch_add(1, Ordering::Relaxed);
+        debug_assert!(old_counter != 0);
+    }
+
+    pub(super) fn wait(&self) {
+        self.lock_latch.wait();
+    }
+}
+
+impl Latch for CountLockLatch {
+    #[inline]
+    unsafe fn set(this: *const Self) {
+        if (*this).counter.fetch_sub(1, Ordering::SeqCst) == 1 {
+            LockLatch::set(&(*this).lock_latch);
+        }
+    }
+}
+
+/// `&L` without any implication of `dereferenceable` for `Latch::set`
+pub(super) struct LatchRef<'a, L> {
+    inner: *const L,
+    marker: PhantomData<&'a L>,
+}
+
+impl<L> LatchRef<'_, L> {
+    pub(super) fn new(inner: &L) -> LatchRef<'_, L> {
+        LatchRef {
+            inner,
+            marker: PhantomData,
+        }
+    }
+}
+
+unsafe impl<L: Sync> Sync for LatchRef<'_, L> {}
+
+impl<L> Deref for LatchRef<'_, L> {
+    type Target = L;
+
+    fn deref(&self) -> &L {
+        // SAFETY: if we have &self, the inner latch is still alive
+        unsafe { &*self.inner }
+    }
+}
+
+impl<L: Latch> Latch for LatchRef<'_, L> {
+    #[inline]
+    unsafe fn set(this: *const Self) {
+        L::set((*this).inner);
+    }
+}
diff --git a/rayon/rayon-core/src/lib.rs b/rayon/rayon-core/src/lib.rs
new file mode 100644
index 0000000..c9694ee
--- /dev/null
+++ b/rayon/rayon-core/src/lib.rs
@@ -0,0 +1,841 @@
+//! Rayon-core houses the core stable APIs of Rayon.
+//!
+//! These APIs have been mirrored in the Rayon crate and it is recommended to use these from there.
+//!
+//! [`join`] is used to take two closures and potentially run them in parallel.
+//!   - It will run in parallel if task B gets stolen before task A can finish.
+//!   - It will run sequentially if task A finishes before task B is stolen and can continue on task B.
+//!
+//! [`scope`] creates a scope in which you can run any number of parallel tasks.
+//! These tasks can spawn nested tasks and scopes, but given the nature of work stealing, the order of execution can not be guaranteed.
+//! The scope will exist until all tasks spawned within the scope have been completed.
+//!
+//! [`spawn`] add a task into the 'static' or 'global' scope, or a local scope created by the [`scope()`] function.
+//!
+//! [`ThreadPool`] can be used to create your own thread pools (using [`ThreadPoolBuilder`]) or to customize the global one.
+//! Tasks spawned within the pool (using [`install()`], [`join()`], etc.) will be added to a deque,
+//! where it becomes available for work stealing from other threads in the local threadpool.
+//!
+//! [`join`]: fn.join.html
+//! [`scope`]: fn.scope.html
+//! [`scope()`]: fn.scope.html
+//! [`spawn`]: fn.spawn.html
+//! [`ThreadPool`]: struct.threadpool.html
+//! [`install()`]: struct.ThreadPool.html#method.install
+//! [`spawn()`]: struct.ThreadPool.html#method.spawn
+//! [`join()`]: struct.ThreadPool.html#method.join
+//! [`ThreadPoolBuilder`]: struct.ThreadPoolBuilder.html
+//!
+//! # Global fallback when threading is unsupported
+//!
+//! Rayon uses `std` APIs for threading, but some targets have incomplete implementations that
+//! always return `Unsupported` errors. The WebAssembly `wasm32-unknown-unknown` and `wasm32-wasi`
+//! targets are notable examples of this. Rather than panicking on the unsupported error when
+//! creating the implicit global threadpool, Rayon configures a fallback mode instead.
+//!
+//! This fallback mode mostly functions as if it were using a single-threaded "pool", like setting
+//! `RAYON_NUM_THREADS=1`. For example, `join` will execute its two closures sequentially, since
+//! there is no other thread to share the work. However, since the pool is not running independent
+//! of the main thread, non-blocking calls like `spawn` may not execute at all, unless a lower-
+//! priority call like `broadcast` gives them an opening. The fallback mode does not try to emulate
+//! anything like thread preemption or `async` task switching, but `yield_now` or `yield_local`
+//! can also volunteer execution time.
+//!
+//! Explicit `ThreadPoolBuilder` methods always report their error without any fallback.
+//!
+//! # Restricting multiple versions
+//!
+//! In order to ensure proper coordination between threadpools, and especially
+//! to make sure there's only one global threadpool, `rayon-core` is actively
+//! restricted from building multiple versions of itself into a single target.
+//! You may see a build error like this in violation:
+//!
+//! ```text
+//! error: native library `rayon-core` is being linked to by more
+//! than one package, and can only be linked to by one package
+//! ```
+//!
+//! While we strive to keep `rayon-core` semver-compatible, it's still
+//! possible to arrive at this situation if different crates have overly
+//! restrictive tilde or inequality requirements for `rayon-core`.  The
+//! conflicting requirements will need to be resolved before the build will
+//! succeed.
+
+#![deny(missing_debug_implementations)]
+#![deny(missing_docs)]
+#![deny(unreachable_pub)]
+#![warn(rust_2018_idioms)]
+
+use std::any::Any;
+use std::env;
+use std::error::Error;
+use std::fmt;
+use std::io;
+use std::marker::PhantomData;
+use std::str::FromStr;
+
+#[macro_use]
+mod log;
+#[macro_use]
+mod private;
+
+mod broadcast;
+mod job;
+mod join;
+mod latch;
+mod registry;
+mod scope;
+mod sleep;
+mod spawn;
+mod thread_pool;
+mod unwind;
+
+mod compile_fail;
+mod test;
+
+pub use self::broadcast::{broadcast, spawn_broadcast, BroadcastContext};
+pub use self::join::{join, join_context};
+pub use self::registry::ThreadBuilder;
+pub use self::scope::{in_place_scope, scope, Scope};
+pub use self::scope::{in_place_scope_fifo, scope_fifo, ScopeFifo};
+pub use self::spawn::{spawn, spawn_fifo};
+pub use self::thread_pool::current_thread_has_pending_tasks;
+pub use self::thread_pool::current_thread_index;
+pub use self::thread_pool::ThreadPool;
+pub use self::thread_pool::{yield_local, yield_now, Yield};
+
+use self::registry::{CustomSpawn, DefaultSpawn, ThreadSpawn};
+
+/// Returns the maximum number of threads that Rayon supports in a single thread-pool.
+///
+/// If a higher thread count is requested by calling `ThreadPoolBuilder::num_threads` or by setting
+/// the `RAYON_NUM_THREADS` environment variable, then it will be reduced to this maximum.
+///
+/// The value may vary between different targets, and is subject to change in new Rayon versions.
+pub fn max_num_threads() -> usize {
+    // We are limited by the bits available in the sleep counter's `AtomicUsize`.
+    crate::sleep::THREADS_MAX
+}
+
+/// Returns the number of threads in the current registry. If this
+/// code is executing within a Rayon thread-pool, then this will be
+/// the number of threads for the thread-pool of the current
+/// thread. Otherwise, it will be the number of threads for the global
+/// thread-pool.
+///
+/// This can be useful when trying to judge how many times to split
+/// parallel work (the parallel iterator traits use this value
+/// internally for this purpose).
+///
+/// # Future compatibility note
+///
+/// Note that unless this thread-pool was created with a
+/// builder that specifies the number of threads, then this
+/// number may vary over time in future versions (see [the
+/// `num_threads()` method for details][snt]).
+///
+/// [snt]: struct.ThreadPoolBuilder.html#method.num_threads
+pub fn current_num_threads() -> usize {
+    crate::registry::Registry::current_num_threads()
+}
+
+/// Error when initializing a thread pool.
+#[derive(Debug)]
+pub struct ThreadPoolBuildError {
+    kind: ErrorKind,
+}
+
+#[derive(Debug)]
+enum ErrorKind {
+    GlobalPoolAlreadyInitialized,
+    IOError(io::Error),
+}
+
+/// Used to create a new [`ThreadPool`] or to configure the global rayon thread pool.
+/// ## Creating a ThreadPool
+/// The following creates a thread pool with 22 threads.
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// let pool = rayon::ThreadPoolBuilder::new().num_threads(22).build().unwrap();
+/// ```
+///
+/// To instead configure the global thread pool, use [`build_global()`]:
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// rayon::ThreadPoolBuilder::new().num_threads(22).build_global().unwrap();
+/// ```
+///
+/// [`ThreadPool`]: struct.ThreadPool.html
+/// [`build_global()`]: struct.ThreadPoolBuilder.html#method.build_global
+pub struct ThreadPoolBuilder<S = DefaultSpawn> {
+    /// The number of threads in the rayon thread pool.
+    /// If zero will use the RAYON_NUM_THREADS environment variable.
+    /// If RAYON_NUM_THREADS is invalid or zero will use the default.
+    num_threads: usize,
+
+    /// Custom closure, if any, to handle a panic that we cannot propagate
+    /// anywhere else.
+    panic_handler: Option<Box<PanicHandler>>,
+
+    /// Closure to compute the name of a thread.
+    get_thread_name: Option<Box<dyn FnMut(usize) -> String>>,
+
+    /// The stack size for the created worker threads
+    stack_size: Option<usize>,
+
+    /// Closure invoked on worker thread start.
+    start_handler: Option<Box<StartHandler>>,
+
+    /// Closure invoked on worker thread exit.
+    exit_handler: Option<Box<ExitHandler>>,
+
+    /// Closure invoked to spawn threads.
+    spawn_handler: S,
+
+    /// If false, worker threads will execute spawned jobs in a
+    /// "depth-first" fashion. If true, they will do a "breadth-first"
+    /// fashion. Depth-first is the default.
+    breadth_first: bool,
+}
+
+/// Contains the rayon thread pool configuration. Use [`ThreadPoolBuilder`] instead.
+///
+/// [`ThreadPoolBuilder`]: struct.ThreadPoolBuilder.html
+#[deprecated(note = "Use `ThreadPoolBuilder`")]
+#[derive(Default)]
+pub struct Configuration {
+    builder: ThreadPoolBuilder,
+}
+
+/// The type for a panic handling closure. Note that this same closure
+/// may be invoked multiple times in parallel.
+type PanicHandler = dyn Fn(Box<dyn Any + Send>) + Send + Sync;
+
+/// The type for a closure that gets invoked when a thread starts. The
+/// closure is passed the index of the thread on which it is invoked.
+/// Note that this same closure may be invoked multiple times in parallel.
+type StartHandler = dyn Fn(usize) + Send + Sync;
+
+/// The type for a closure that gets invoked when a thread exits. The
+/// closure is passed the index of the thread on which is is invoked.
+/// Note that this same closure may be invoked multiple times in parallel.
+type ExitHandler = dyn Fn(usize) + Send + Sync;
+
+// NB: We can't `#[derive(Default)]` because `S` is left ambiguous.
+impl Default for ThreadPoolBuilder {
+    fn default() -> Self {
+        ThreadPoolBuilder {
+            num_threads: 0,
+            panic_handler: None,
+            get_thread_name: None,
+            stack_size: None,
+            start_handler: None,
+            exit_handler: None,
+            spawn_handler: DefaultSpawn,
+            breadth_first: false,
+        }
+    }
+}
+
+impl ThreadPoolBuilder {
+    /// Creates and returns a valid rayon thread pool builder, but does not initialize it.
+    pub fn new() -> Self {
+        Self::default()
+    }
+}
+
+/// Note: the `S: ThreadSpawn` constraint is an internal implementation detail for the
+/// default spawn and those set by [`spawn_handler`](#method.spawn_handler).
+impl<S> ThreadPoolBuilder<S>
+where
+    S: ThreadSpawn,
+{
+    /// Creates a new `ThreadPool` initialized using this configuration.
+    pub fn build(self) -> Result<ThreadPool, ThreadPoolBuildError> {
+        ThreadPool::build(self)
+    }
+
+    /// Initializes the global thread pool. This initialization is
+    /// **optional**.  If you do not call this function, the thread pool
+    /// will be automatically initialized with the default
+    /// configuration. Calling `build_global` is not recommended, except
+    /// in two scenarios:
+    ///
+    /// - You wish to change the default configuration.
+    /// - You are running a benchmark, in which case initializing may
+    ///   yield slightly more consistent results, since the worker threads
+    ///   will already be ready to go even in the first iteration.  But
+    ///   this cost is minimal.
+    ///
+    /// Initialization of the global thread pool happens exactly
+    /// once. Once started, the configuration cannot be
+    /// changed. Therefore, if you call `build_global` a second time, it
+    /// will return an error. An `Ok` result indicates that this
+    /// is the first initialization of the thread pool.
+    pub fn build_global(self) -> Result<(), ThreadPoolBuildError> {
+        let registry = registry::init_global_registry(self)?;
+        registry.wait_until_primed();
+        Ok(())
+    }
+}
+
+impl ThreadPoolBuilder {
+    /// Creates a scoped `ThreadPool` initialized using this configuration.
+    ///
+    /// This is a convenience function for building a pool using [`crossbeam::scope`]
+    /// to spawn threads in a [`spawn_handler`](#method.spawn_handler).
+    /// The threads in this pool will start by calling `wrapper`, which should
+    /// do initialization and continue by calling `ThreadBuilder::run()`.
+    ///
+    /// [`crossbeam::scope`]: https://docs.rs/crossbeam/0.8/crossbeam/fn.scope.html
+    ///
+    /// # Examples
+    ///
+    /// A scoped pool may be useful in combination with scoped thread-local variables.
+    ///
+    /// ```
+    /// # use rayon_core as rayon;
+    ///
+    /// scoped_tls::scoped_thread_local!(static POOL_DATA: Vec<i32>);
+    ///
+    /// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
+    ///     let pool_data = vec![1, 2, 3];
+    ///
+    ///     // We haven't assigned any TLS data yet.
+    ///     assert!(!POOL_DATA.is_set());
+    ///
+    ///     rayon::ThreadPoolBuilder::new()
+    ///         .build_scoped(
+    ///             // Borrow `pool_data` in TLS for each thread.
+    ///             |thread| POOL_DATA.set(&pool_data, || thread.run()),
+    ///             // Do some work that needs the TLS data.
+    ///             |pool| pool.install(|| assert!(POOL_DATA.is_set())),
+    ///         )?;
+    ///
+    ///     // Once we've returned, `pool_data` is no longer borrowed.
+    ///     drop(pool_data);
+    ///     Ok(())
+    /// }
+    /// ```
+    pub fn build_scoped<W, F, R>(self, wrapper: W, with_pool: F) -> Result<R, ThreadPoolBuildError>
+    where
+        W: Fn(ThreadBuilder) + Sync, // expected to call `run()`
+        F: FnOnce(&ThreadPool) -> R,
+    {
+        let result = crossbeam_utils::thread::scope(|scope| {
+            let wrapper = &wrapper;
+            let pool = self
+                .spawn_handler(|thread| {
+                    let mut builder = scope.builder();
+                    if let Some(name) = thread.name() {
+                        builder = builder.name(name.to_string());
+                    }
+                    if let Some(size) = thread.stack_size() {
+                        builder = builder.stack_size(size);
+                    }
+                    builder.spawn(move |_| wrapper(thread))?;
+                    Ok(())
+                })
+                .build()?;
+            Ok(with_pool(&pool))
+        });
+
+        match result {
+            Ok(result) => result,
+            Err(err) => unwind::resume_unwinding(err),
+        }
+    }
+}
+
+impl<S> ThreadPoolBuilder<S> {
+    /// Sets a custom function for spawning threads.
+    ///
+    /// Note that the threads will not exit until after the pool is dropped. It
+    /// is up to the caller to wait for thread termination if that is important
+    /// for any invariants. For instance, threads created in [`crossbeam::scope`]
+    /// will be joined before that scope returns, and this will block indefinitely
+    /// if the pool is leaked. Furthermore, the global thread pool doesn't terminate
+    /// until the entire process exits!
+    ///
+    /// [`crossbeam::scope`]: https://docs.rs/crossbeam/0.8/crossbeam/fn.scope.html
+    ///
+    /// # Examples
+    ///
+    /// A minimal spawn handler just needs to call `run()` from an independent thread.
+    ///
+    /// ```
+    /// # use rayon_core as rayon;
+    /// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
+    ///     let pool = rayon::ThreadPoolBuilder::new()
+    ///         .spawn_handler(|thread| {
+    ///             std::thread::spawn(|| thread.run());
+    ///             Ok(())
+    ///         })
+    ///         .build()?;
+    ///
+    ///     pool.install(|| println!("Hello from my custom thread!"));
+    ///     Ok(())
+    /// }
+    /// ```
+    ///
+    /// The default spawn handler sets the name and stack size if given, and propagates
+    /// any errors from the thread builder.
+    ///
+    /// ```
+    /// # use rayon_core as rayon;
+    /// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
+    ///     let pool = rayon::ThreadPoolBuilder::new()
+    ///         .spawn_handler(|thread| {
+    ///             let mut b = std::thread::Builder::new();
+    ///             if let Some(name) = thread.name() {
+    ///                 b = b.name(name.to_owned());
+    ///             }
+    ///             if let Some(stack_size) = thread.stack_size() {
+    ///                 b = b.stack_size(stack_size);
+    ///             }
+    ///             b.spawn(|| thread.run())?;
+    ///             Ok(())
+    ///         })
+    ///         .build()?;
+    ///
+    ///     pool.install(|| println!("Hello from my fully custom thread!"));
+    ///     Ok(())
+    /// }
+    /// ```
+    ///
+    /// This can also be used for a pool of scoped threads like [`crossbeam::scope`],
+    /// or [`std::thread::scope`] introduced in Rust 1.63, which is encapsulated in
+    /// [`build_scoped`](#method.build_scoped).
+    ///
+    /// [`std::thread::scope`]: https://doc.rust-lang.org/std/thread/fn.scope.html
+    ///
+    /// ```
+    /// # use rayon_core as rayon;
+    /// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
+    ///     std::thread::scope(|scope| {
+    ///         let pool = rayon::ThreadPoolBuilder::new()
+    ///             .spawn_handler(|thread| {
+    ///                 let mut builder = std::thread::Builder::new();
+    ///                 if let Some(name) = thread.name() {
+    ///                     builder = builder.name(name.to_string());
+    ///                 }
+    ///                 if let Some(size) = thread.stack_size() {
+    ///                     builder = builder.stack_size(size);
+    ///                 }
+    ///                 builder.spawn_scoped(scope, || {
+    ///                     // Add any scoped initialization here, then run!
+    ///                     thread.run()
+    ///                 })?;
+    ///                 Ok(())
+    ///             })
+    ///             .build()?;
+    ///
+    ///         pool.install(|| println!("Hello from my custom scoped thread!"));
+    ///         Ok(())
+    ///     })
+    /// }
+    /// ```
+    pub fn spawn_handler<F>(self, spawn: F) -> ThreadPoolBuilder<CustomSpawn<F>>
+    where
+        F: FnMut(ThreadBuilder) -> io::Result<()>,
+    {
+        ThreadPoolBuilder {
+            spawn_handler: CustomSpawn::new(spawn),
+            // ..self
+            num_threads: self.num_threads,
+            panic_handler: self.panic_handler,
+            get_thread_name: self.get_thread_name,
+            stack_size: self.stack_size,
+            start_handler: self.start_handler,
+            exit_handler: self.exit_handler,
+            breadth_first: self.breadth_first,
+        }
+    }
+
+    /// Returns a reference to the current spawn handler.
+    fn get_spawn_handler(&mut self) -> &mut S {
+        &mut self.spawn_handler
+    }
+
+    /// Get the number of threads that will be used for the thread
+    /// pool. See `num_threads()` for more information.
+    fn get_num_threads(&self) -> usize {
+        if self.num_threads > 0 {
+            self.num_threads
+        } else {
+            match env::var("RAYON_NUM_THREADS")
+                .ok()
+                .and_then(|s| usize::from_str(&s).ok())
+            {
+                Some(x) if x > 0 => return x,
+                Some(x) if x == 0 => return num_cpus::get(),
+                _ => {}
+            }
+
+            // Support for deprecated `RAYON_RS_NUM_CPUS`.
+            match env::var("RAYON_RS_NUM_CPUS")
+                .ok()
+                .and_then(|s| usize::from_str(&s).ok())
+            {
+                Some(x) if x > 0 => x,
+                _ => num_cpus::get(),
+            }
+        }
+    }
+
+    /// Get the thread name for the thread with the given index.
+    fn get_thread_name(&mut self, index: usize) -> Option<String> {
+        let f = self.get_thread_name.as_mut()?;
+        Some(f(index))
+    }
+
+    /// Sets a closure which takes a thread index and returns
+    /// the thread's name.
+    pub fn thread_name<F>(mut self, closure: F) -> Self
+    where
+        F: FnMut(usize) -> String + 'static,
+    {
+        self.get_thread_name = Some(Box::new(closure));
+        self
+    }
+
+    /// Sets the number of threads to be used in the rayon threadpool.
+    ///
+    /// If you specify a non-zero number of threads using this
+    /// function, then the resulting thread-pools are guaranteed to
+    /// start at most this number of threads.
+    ///
+    /// If `num_threads` is 0, or you do not call this function, then
+    /// the Rayon runtime will select the number of threads
+    /// automatically. At present, this is based on the
+    /// `RAYON_NUM_THREADS` environment variable (if set),
+    /// or the number of logical CPUs (otherwise).
+    /// In the future, however, the default behavior may
+    /// change to dynamically add or remove threads as needed.
+    ///
+    /// **Future compatibility warning:** Given the default behavior
+    /// may change in the future, if you wish to rely on a fixed
+    /// number of threads, you should use this function to specify
+    /// that number. To reproduce the current default behavior, you
+    /// may wish to use the [`num_cpus`
+    /// crate](https://crates.io/crates/num_cpus) to query the number
+    /// of CPUs dynamically.
+    ///
+    /// **Old environment variable:** `RAYON_NUM_THREADS` is a one-to-one
+    /// replacement of the now deprecated `RAYON_RS_NUM_CPUS` environment
+    /// variable. If both variables are specified, `RAYON_NUM_THREADS` will
+    /// be preferred.
+    pub fn num_threads(mut self, num_threads: usize) -> Self {
+        self.num_threads = num_threads;
+        self
+    }
+
+    /// Returns a copy of the current panic handler.
+    fn take_panic_handler(&mut self) -> Option<Box<PanicHandler>> {
+        self.panic_handler.take()
+    }
+
+    /// Normally, whenever Rayon catches a panic, it tries to
+    /// propagate it to someplace sensible, to try and reflect the
+    /// semantics of sequential execution. But in some cases,
+    /// particularly with the `spawn()` APIs, there is no
+    /// obvious place where we should propagate the panic to.
+    /// In that case, this panic handler is invoked.
+    ///
+    /// If no panic handler is set, the default is to abort the
+    /// process, under the principle that panics should not go
+    /// unobserved.
+    ///
+    /// If the panic handler itself panics, this will abort the
+    /// process. To prevent this, wrap the body of your panic handler
+    /// in a call to `std::panic::catch_unwind()`.
+    pub fn panic_handler<H>(mut self, panic_handler: H) -> Self
+    where
+        H: Fn(Box<dyn Any + Send>) + Send + Sync + 'static,
+    {
+        self.panic_handler = Some(Box::new(panic_handler));
+        self
+    }
+
+    /// Get the stack size of the worker threads
+    fn get_stack_size(&self) -> Option<usize> {
+        self.stack_size
+    }
+
+    /// Sets the stack size of the worker threads
+    pub fn stack_size(mut self, stack_size: usize) -> Self {
+        self.stack_size = Some(stack_size);
+        self
+    }
+
+    /// **(DEPRECATED)** Suggest to worker threads that they execute
+    /// spawned jobs in a "breadth-first" fashion.
+    ///
+    /// Typically, when a worker thread is idle or blocked, it will
+    /// attempt to execute the job from the *top* of its local deque of
+    /// work (i.e., the job most recently spawned). If this flag is set
+    /// to true, however, workers will prefer to execute in a
+    /// *breadth-first* fashion -- that is, they will search for jobs at
+    /// the *bottom* of their local deque. (At present, workers *always*
+    /// steal from the bottom of other workers' deques, regardless of
+    /// the setting of this flag.)
+    ///
+    /// If you think of the tasks as a tree, where a parent task
+    /// spawns its children in the tree, then this flag loosely
+    /// corresponds to doing a breadth-first traversal of the tree,
+    /// whereas the default would be to do a depth-first traversal.
+    ///
+    /// **Note that this is an "execution hint".** Rayon's task
+    /// execution is highly dynamic and the precise order in which
+    /// independent tasks are executed is not intended to be
+    /// guaranteed.
+    ///
+    /// This `breadth_first()` method is now deprecated per [RFC #1],
+    /// and in the future its effect may be removed. Consider using
+    /// [`scope_fifo()`] for a similar effect.
+    ///
+    /// [RFC #1]: https://github.com/rayon-rs/rfcs/blob/master/accepted/rfc0001-scope-scheduling.md
+    /// [`scope_fifo()`]: fn.scope_fifo.html
+    #[deprecated(note = "use `scope_fifo` and `spawn_fifo` for similar effect")]
+    pub fn breadth_first(mut self) -> Self {
+        self.breadth_first = true;
+        self
+    }
+
+    fn get_breadth_first(&self) -> bool {
+        self.breadth_first
+    }
+
+    /// Takes the current thread start callback, leaving `None`.
+    fn take_start_handler(&mut self) -> Option<Box<StartHandler>> {
+        self.start_handler.take()
+    }
+
+    /// Sets a callback to be invoked on thread start.
+    ///
+    /// The closure is passed the index of the thread on which it is invoked.
+    /// Note that this same closure may be invoked multiple times in parallel.
+    /// If this closure panics, the panic will be passed to the panic handler.
+    /// If that handler returns, then startup will continue normally.
+    pub fn start_handler<H>(mut self, start_handler: H) -> Self
+    where
+        H: Fn(usize) + Send + Sync + 'static,
+    {
+        self.start_handler = Some(Box::new(start_handler));
+        self
+    }
+
+    /// Returns a current thread exit callback, leaving `None`.
+    fn take_exit_handler(&mut self) -> Option<Box<ExitHandler>> {
+        self.exit_handler.take()
+    }
+
+    /// Sets a callback to be invoked on thread exit.
+    ///
+    /// The closure is passed the index of the thread on which it is invoked.
+    /// Note that this same closure may be invoked multiple times in parallel.
+    /// If this closure panics, the panic will be passed to the panic handler.
+    /// If that handler returns, then the thread will exit normally.
+    pub fn exit_handler<H>(mut self, exit_handler: H) -> Self
+    where
+        H: Fn(usize) + Send + Sync + 'static,
+    {
+        self.exit_handler = Some(Box::new(exit_handler));
+        self
+    }
+}
+
+#[allow(deprecated)]
+impl Configuration {
+    /// Creates and return a valid rayon thread pool configuration, but does not initialize it.
+    pub fn new() -> Configuration {
+        Configuration {
+            builder: ThreadPoolBuilder::new(),
+        }
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::build`.
+    pub fn build(self) -> Result<ThreadPool, Box<dyn Error + 'static>> {
+        self.builder.build().map_err(Box::from)
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::thread_name`.
+    pub fn thread_name<F>(mut self, closure: F) -> Self
+    where
+        F: FnMut(usize) -> String + 'static,
+    {
+        self.builder = self.builder.thread_name(closure);
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::num_threads`.
+    pub fn num_threads(mut self, num_threads: usize) -> Configuration {
+        self.builder = self.builder.num_threads(num_threads);
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::panic_handler`.
+    pub fn panic_handler<H>(mut self, panic_handler: H) -> Configuration
+    where
+        H: Fn(Box<dyn Any + Send>) + Send + Sync + 'static,
+    {
+        self.builder = self.builder.panic_handler(panic_handler);
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::stack_size`.
+    pub fn stack_size(mut self, stack_size: usize) -> Self {
+        self.builder = self.builder.stack_size(stack_size);
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::breadth_first`.
+    pub fn breadth_first(mut self) -> Self {
+        self.builder = self.builder.breadth_first();
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::start_handler`.
+    pub fn start_handler<H>(mut self, start_handler: H) -> Configuration
+    where
+        H: Fn(usize) + Send + Sync + 'static,
+    {
+        self.builder = self.builder.start_handler(start_handler);
+        self
+    }
+
+    /// Deprecated in favor of `ThreadPoolBuilder::exit_handler`.
+    pub fn exit_handler<H>(mut self, exit_handler: H) -> Configuration
+    where
+        H: Fn(usize) + Send + Sync + 'static,
+    {
+        self.builder = self.builder.exit_handler(exit_handler);
+        self
+    }
+
+    /// Returns a ThreadPoolBuilder with identical parameters.
+    fn into_builder(self) -> ThreadPoolBuilder {
+        self.builder
+    }
+}
+
+impl ThreadPoolBuildError {
+    fn new(kind: ErrorKind) -> ThreadPoolBuildError {
+        ThreadPoolBuildError { kind }
+    }
+
+    fn is_unsupported(&self) -> bool {
+        matches!(&self.kind, ErrorKind::IOError(e) if e.kind() == io::ErrorKind::Unsupported)
+    }
+}
+
+const GLOBAL_POOL_ALREADY_INITIALIZED: &str =
+    "The global thread pool has already been initialized.";
+
+impl Error for ThreadPoolBuildError {
+    #[allow(deprecated)]
+    fn description(&self) -> &str {
+        match self.kind {
+            ErrorKind::GlobalPoolAlreadyInitialized => GLOBAL_POOL_ALREADY_INITIALIZED,
+            ErrorKind::IOError(ref e) => e.description(),
+        }
+    }
+
+    fn source(&self) -> Option<&(dyn Error + 'static)> {
+        match &self.kind {
+            ErrorKind::GlobalPoolAlreadyInitialized => None,
+            ErrorKind::IOError(e) => Some(e),
+        }
+    }
+}
+
+impl fmt::Display for ThreadPoolBuildError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match &self.kind {
+            ErrorKind::GlobalPoolAlreadyInitialized => GLOBAL_POOL_ALREADY_INITIALIZED.fmt(f),
+            ErrorKind::IOError(e) => e.fmt(f),
+        }
+    }
+}
+
+/// Deprecated in favor of `ThreadPoolBuilder::build_global`.
+#[deprecated(note = "use `ThreadPoolBuilder::build_global`")]
+#[allow(deprecated)]
+pub fn initialize(config: Configuration) -> Result<(), Box<dyn Error>> {
+    config.into_builder().build_global().map_err(Box::from)
+}
+
+impl<S> fmt::Debug for ThreadPoolBuilder<S> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let ThreadPoolBuilder {
+            ref num_threads,
+            ref get_thread_name,
+            ref panic_handler,
+            ref stack_size,
+            ref start_handler,
+            ref exit_handler,
+            spawn_handler: _,
+            ref breadth_first,
+        } = *self;
+
+        // Just print `Some(<closure>)` or `None` to the debug
+        // output.
+        struct ClosurePlaceholder;
+        impl fmt::Debug for ClosurePlaceholder {
+            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                f.write_str("<closure>")
+            }
+        }
+        let get_thread_name = get_thread_name.as_ref().map(|_| ClosurePlaceholder);
+        let panic_handler = panic_handler.as_ref().map(|_| ClosurePlaceholder);
+        let start_handler = start_handler.as_ref().map(|_| ClosurePlaceholder);
+        let exit_handler = exit_handler.as_ref().map(|_| ClosurePlaceholder);
+
+        f.debug_struct("ThreadPoolBuilder")
+            .field("num_threads", num_threads)
+            .field("get_thread_name", &get_thread_name)
+            .field("panic_handler", &panic_handler)
+            .field("stack_size", &stack_size)
+            .field("start_handler", &start_handler)
+            .field("exit_handler", &exit_handler)
+            .field("breadth_first", &breadth_first)
+            .finish()
+    }
+}
+
+#[allow(deprecated)]
+impl fmt::Debug for Configuration {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.builder.fmt(f)
+    }
+}
+
+/// Provides the calling context to a closure called by `join_context`.
+#[derive(Debug)]
+pub struct FnContext {
+    migrated: bool,
+
+    /// disable `Send` and `Sync`, just for a little future-proofing.
+    _marker: PhantomData<*mut ()>,
+}
+
+impl FnContext {
+    #[inline]
+    fn new(migrated: bool) -> Self {
+        FnContext {
+            migrated,
+            _marker: PhantomData,
+        }
+    }
+}
+
+impl FnContext {
+    /// Returns `true` if the closure was called from a different thread
+    /// than it was provided from.
+    #[inline]
+    pub fn migrated(&self) -> bool {
+        self.migrated
+    }
+}
diff --git a/rayon/rayon-core/src/log.rs b/rayon/rayon-core/src/log.rs
new file mode 100644
index 0000000..5f54fcd
--- /dev/null
+++ b/rayon/rayon-core/src/log.rs
@@ -0,0 +1,421 @@
+//! Debug Logging
+//!
+//! To use in a debug build, set the env var `RAYON_LOG` as
+//! described below.  In a release build, logs are compiled out by
+//! default unless Rayon is built with `--cfg rayon_rs_log` (try
+//! `RUSTFLAGS="--cfg rayon_rs_log"`).
+//!
+//! Note that logs are an internally debugging tool and their format
+//! is considered unstable, as are the details of how to enable them.
+//!
+//! # Valid values for RAYON_LOG
+//!
+//! The `RAYON_LOG` variable can take on the following values:
+//!
+//! * `tail:<file>` -- dumps the last 10,000 events into the given file;
+//!   useful for tracking down deadlocks
+//! * `profile:<file>` -- dumps only those events needed to reconstruct how
+//!   many workers are active at a given time
+//! * `all:<file>` -- dumps every event to the file; useful for debugging
+
+use crossbeam_channel::{self, Receiver, Sender};
+use std::collections::VecDeque;
+use std::env;
+#[cfg(not(target_vendor = "teaclave"))]
+use std::fs::File;
+// Logger is disbaled in teaclave, use the untrusted fs for compilation
+use std::io::{self, BufWriter, Write};
+#[cfg(target_vendor = "teaclave")]
+use std::untrusted::fs::File;
+
+/// True if logs are compiled in.
+pub(super) const LOG_ENABLED: bool = cfg!(any(
+    rayon_rs_log,
+    debug_assertions,
+    target_vendor = "teaclave"
+));
+
+#[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Debug)]
+pub(super) enum Event {
+    /// Flushes events to disk, used to terminate benchmarking.
+    Flush,
+
+    /// Indicates that a worker thread started execution.
+    ThreadStart {
+        worker: usize,
+        terminate_addr: usize,
+    },
+
+    /// Indicates that a worker thread started execution.
+    ThreadTerminate { worker: usize },
+
+    /// Indicates that a worker thread became idle, blocked on `latch_addr`.
+    ThreadIdle { worker: usize, latch_addr: usize },
+
+    /// Indicates that an idle worker thread found work to do, after
+    /// yield rounds. It should no longer be considered idle.
+    ThreadFoundWork { worker: usize, yields: u32 },
+
+    /// Indicates that a worker blocked on a latch observed that it was set.
+    ///
+    /// Internal debugging event that does not affect the state
+    /// machine.
+    ThreadSawLatchSet { worker: usize, latch_addr: usize },
+
+    /// Indicates that an idle worker is getting sleepy. `sleepy_counter` is the internal
+    /// sleep state that we saw at the time.
+    ThreadSleepy { worker: usize, jobs_counter: usize },
+
+    /// Indicates that the thread's attempt to fall asleep was
+    /// interrupted because the latch was set. (This is not, in and of
+    /// itself, a change to the thread state.)
+    ThreadSleepInterruptedByLatch { worker: usize, latch_addr: usize },
+
+    /// Indicates that the thread's attempt to fall asleep was
+    /// interrupted because a job was posted. (This is not, in and of
+    /// itself, a change to the thread state.)
+    ThreadSleepInterruptedByJob { worker: usize },
+
+    /// Indicates that an idle worker has gone to sleep.
+    ThreadSleeping { worker: usize, latch_addr: usize },
+
+    /// Indicates that a sleeping worker has awoken.
+    ThreadAwoken { worker: usize, latch_addr: usize },
+
+    /// Indicates that the given worker thread was notified it should
+    /// awaken.
+    ThreadNotify { worker: usize },
+
+    /// The given worker has pushed a job to its local deque.
+    JobPushed { worker: usize },
+
+    /// The given worker has popped a job from its local deque.
+    JobPopped { worker: usize },
+
+    /// The given worker has stolen a job from the deque of another.
+    JobStolen { worker: usize, victim: usize },
+
+    /// N jobs were injected into the global queue.
+    JobsInjected { count: usize },
+
+    /// A job was removed from the global queue.
+    JobUninjected { worker: usize },
+
+    /// A job was broadcasted to N threads.
+    JobBroadcast { count: usize },
+
+    /// When announcing a job, this was the value of the counters we observed.
+    ///
+    /// No effect on thread state, just a debugging event.
+    JobThreadCounts {
+        worker: usize,
+        num_idle: u16,
+        num_sleepers: u16,
+    },
+}
+
+/// Handle to the logging thread, if any. You can use this to deliver
+/// logs. You can also clone it freely.
+#[derive(Clone)]
+pub(super) struct Logger {
+    sender: Option<Sender<Event>>,
+}
+
+impl Logger {
+    pub(super) fn new(num_workers: usize) -> Logger {
+        if !LOG_ENABLED {
+            return Self::disabled();
+        }
+
+        // see the doc comment for the format
+        let env_log = match env::var("RAYON_LOG") {
+            Ok(s) => s,
+            Err(_) => return Self::disabled(),
+        };
+
+        let (sender, receiver) = crossbeam_channel::unbounded();
+
+        if let Some(filename) = env_log.strip_prefix("tail:") {
+            let filename = filename.to_string();
+            ::std::thread::spawn(move || {
+                Self::tail_logger_thread(num_workers, filename, 10_000, receiver)
+            });
+        } else if env_log == "all" {
+            ::std::thread::spawn(move || Self::all_logger_thread(num_workers, receiver));
+        } else if let Some(filename) = env_log.strip_prefix("profile:") {
+            let filename = filename.to_string();
+            ::std::thread::spawn(move || {
+                Self::profile_logger_thread(num_workers, filename, 10_000, receiver)
+            });
+        } else {
+            panic!("RAYON_LOG should be 'tail:<file>' or 'profile:<file>'");
+        }
+
+        Logger {
+            sender: Some(sender),
+        }
+    }
+
+    fn disabled() -> Logger {
+        Logger { sender: None }
+    }
+
+    #[inline]
+    pub(super) fn log(&self, event: impl FnOnce() -> Event) {
+        if !LOG_ENABLED {
+            return;
+        }
+
+        if let Some(sender) = &self.sender {
+            sender.send(event()).unwrap();
+        }
+    }
+
+    fn profile_logger_thread(
+        num_workers: usize,
+        log_filename: String,
+        capacity: usize,
+        receiver: Receiver<Event>,
+    ) {
+        let file = File::create(&log_filename)
+            .unwrap_or_else(|err| panic!("failed to open `{}`: {}", log_filename, err));
+
+        let mut writer = BufWriter::new(file);
+        let mut events = Vec::with_capacity(capacity);
+        let mut state = SimulatorState::new(num_workers);
+        let timeout = std::time::Duration::from_secs(30);
+
+        loop {
+            while let Ok(event) = receiver.recv_timeout(timeout) {
+                if let Event::Flush = event {
+                    break;
+                }
+
+                events.push(event);
+                if events.len() == capacity {
+                    break;
+                }
+            }
+
+            for event in events.drain(..) {
+                if state.simulate(&event) {
+                    state.dump(&mut writer, &event).unwrap();
+                }
+            }
+
+            writer.flush().unwrap();
+        }
+    }
+
+    fn tail_logger_thread(
+        num_workers: usize,
+        log_filename: String,
+        capacity: usize,
+        receiver: Receiver<Event>,
+    ) {
+        let file = File::create(&log_filename)
+            .unwrap_or_else(|err| panic!("failed to open `{}`: {}", log_filename, err));
+
+        let mut writer = BufWriter::new(file);
+        let mut events: VecDeque<Event> = VecDeque::with_capacity(capacity);
+        let mut state = SimulatorState::new(num_workers);
+        let timeout = std::time::Duration::from_secs(30);
+        let mut skipped = false;
+
+        loop {
+            while let Ok(event) = receiver.recv_timeout(timeout) {
+                if let Event::Flush = event {
+                    // We ignore Flush events in tail mode --
+                    // we're really just looking for
+                    // deadlocks.
+                    continue;
+                } else {
+                    if events.len() == capacity {
+                        let event = events.pop_front().unwrap();
+                        state.simulate(&event);
+                        skipped = true;
+                    }
+
+                    events.push_back(event);
+                }
+            }
+
+            if skipped {
+                writeln!(writer, "...").unwrap();
+                skipped = false;
+            }
+
+            for event in events.drain(..) {
+                // In tail mode, we dump *all* events out, whether or
+                // not they were 'interesting' to the state machine.
+                state.simulate(&event);
+                state.dump(&mut writer, &event).unwrap();
+            }
+
+            writer.flush().unwrap();
+        }
+    }
+
+    fn all_logger_thread(num_workers: usize, receiver: Receiver<Event>) {
+        let stderr = std::io::stderr();
+        let mut state = SimulatorState::new(num_workers);
+
+        for event in receiver {
+            let mut writer = BufWriter::new(stderr.lock());
+            state.simulate(&event);
+            state.dump(&mut writer, &event).unwrap();
+            writer.flush().unwrap();
+        }
+    }
+}
+
+#[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Debug)]
+enum State {
+    Working,
+    Idle,
+    Notified,
+    Sleeping,
+    Terminated,
+}
+
+impl State {
+    fn letter(&self) -> char {
+        match self {
+            State::Working => 'W',
+            State::Idle => 'I',
+            State::Notified => 'N',
+            State::Sleeping => 'S',
+            State::Terminated => 'T',
+        }
+    }
+}
+
+struct SimulatorState {
+    local_queue_size: Vec<usize>,
+    thread_states: Vec<State>,
+    injector_size: usize,
+}
+
+impl SimulatorState {
+    fn new(num_workers: usize) -> Self {
+        Self {
+            local_queue_size: (0..num_workers).map(|_| 0).collect(),
+            thread_states: (0..num_workers).map(|_| State::Working).collect(),
+            injector_size: 0,
+        }
+    }
+
+    fn simulate(&mut self, event: &Event) -> bool {
+        match *event {
+            Event::ThreadIdle { worker, .. } => {
+                assert_eq!(self.thread_states[worker], State::Working);
+                self.thread_states[worker] = State::Idle;
+                true
+            }
+
+            Event::ThreadStart { worker, .. } | Event::ThreadFoundWork { worker, .. } => {
+                self.thread_states[worker] = State::Working;
+                true
+            }
+
+            Event::ThreadTerminate { worker, .. } => {
+                self.thread_states[worker] = State::Terminated;
+                true
+            }
+
+            Event::ThreadSleeping { worker, .. } => {
+                assert_eq!(self.thread_states[worker], State::Idle);
+                self.thread_states[worker] = State::Sleeping;
+                true
+            }
+
+            Event::ThreadAwoken { worker, .. } => {
+                assert_eq!(self.thread_states[worker], State::Notified);
+                self.thread_states[worker] = State::Idle;
+                true
+            }
+
+            Event::JobPushed { worker } => {
+                self.local_queue_size[worker] += 1;
+                true
+            }
+
+            Event::JobPopped { worker } => {
+                self.local_queue_size[worker] -= 1;
+                true
+            }
+
+            Event::JobStolen { victim, .. } => {
+                self.local_queue_size[victim] -= 1;
+                true
+            }
+
+            Event::JobsInjected { count } => {
+                self.injector_size += count;
+                true
+            }
+
+            Event::JobUninjected { .. } => {
+                self.injector_size -= 1;
+                true
+            }
+
+            Event::ThreadNotify { worker } => {
+                // Currently, this log event occurs while holding the
+                // thread lock, so we should *always* see it before
+                // the worker awakens.
+                assert_eq!(self.thread_states[worker], State::Sleeping);
+                self.thread_states[worker] = State::Notified;
+                true
+            }
+
+            // remaining events are no-ops from pov of simulating the
+            // thread state
+            _ => false,
+        }
+    }
+
+    fn dump(&mut self, w: &mut impl Write, event: &Event) -> io::Result<()> {
+        let num_idle_threads = self
+            .thread_states
+            .iter()
+            .filter(|s| **s == State::Idle)
+            .count();
+
+        let num_sleeping_threads = self
+            .thread_states
+            .iter()
+            .filter(|s| **s == State::Sleeping)
+            .count();
+
+        let num_notified_threads = self
+            .thread_states
+            .iter()
+            .filter(|s| **s == State::Notified)
+            .count();
+
+        let num_pending_jobs: usize = self.local_queue_size.iter().sum();
+
+        write!(w, "{:2},", num_idle_threads)?;
+        write!(w, "{:2},", num_sleeping_threads)?;
+        write!(w, "{:2},", num_notified_threads)?;
+        write!(w, "{:4},", num_pending_jobs)?;
+        write!(w, "{:4},", self.injector_size)?;
+
+        let event_str = format!("{:?}", event);
+        write!(w, r#""{:60}","#, event_str)?;
+
+        for ((i, state), queue_size) in (0..).zip(&self.thread_states).zip(&self.local_queue_size) {
+            write!(w, " T{:02},{}", i, state.letter(),)?;
+
+            if *queue_size > 0 {
+                write!(w, ",{:03},", queue_size)?;
+            } else {
+                write!(w, ",   ,")?;
+            }
+        }
+
+        writeln!(w)?;
+        Ok(())
+    }
+}
diff --git a/rayon/rayon-core/src/private.rs b/rayon/rayon-core/src/private.rs
new file mode 100644
index 0000000..c85e77b
--- /dev/null
+++ b/rayon/rayon-core/src/private.rs
@@ -0,0 +1,26 @@
+//! The public parts of this private module are used to create traits
+//! that cannot be implemented outside of our own crate.  This way we
+//! can feel free to extend those traits without worrying about it
+//! being a breaking change for other implementations.
+
+/// If this type is pub but not publicly reachable, third parties
+/// can't name it and can't implement traits using it.
+#[allow(missing_debug_implementations)]
+pub struct PrivateMarker;
+
+macro_rules! private_decl {
+    () => {
+        /// This trait is private; this method exists to make it
+        /// impossible to implement outside the crate.
+        #[doc(hidden)]
+        fn __rayon_private__(&self) -> crate::private::PrivateMarker;
+    };
+}
+
+macro_rules! private_impl {
+    () => {
+        fn __rayon_private__(&self) -> crate::private::PrivateMarker {
+            crate::private::PrivateMarker
+        }
+    };
+}
diff --git a/rayon/rayon-core/src/registry.rs b/rayon/rayon-core/src/registry.rs
new file mode 100644
index 0000000..5d56ac9
--- /dev/null
+++ b/rayon/rayon-core/src/registry.rs
@@ -0,0 +1,1029 @@
+use crate::job::{JobFifo, JobRef, StackJob};
+use crate::latch::{AsCoreLatch, CoreLatch, CountLatch, Latch, LatchRef, LockLatch, SpinLatch};
+use crate::log::Event::*;
+use crate::log::Logger;
+use crate::sleep::Sleep;
+use crate::unwind;
+use crate::{
+    ErrorKind, ExitHandler, PanicHandler, StartHandler, ThreadPoolBuildError, ThreadPoolBuilder,
+    Yield,
+};
+use crossbeam_deque::{Injector, Steal, Stealer, Worker};
+use std::cell::Cell;
+use std::collections::hash_map::DefaultHasher;
+use std::fmt;
+use std::hash::Hasher;
+use std::io;
+use std::mem;
+use std::ptr;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Arc, Mutex, Once};
+use std::thread;
+use std::usize;
+
+/// Thread builder used for customization via
+/// [`ThreadPoolBuilder::spawn_handler`](struct.ThreadPoolBuilder.html#method.spawn_handler).
+pub struct ThreadBuilder {
+    name: Option<String>,
+    stack_size: Option<usize>,
+    worker: Worker<JobRef>,
+    stealer: Stealer<JobRef>,
+    registry: Arc<Registry>,
+    index: usize,
+}
+
+impl ThreadBuilder {
+    /// Gets the index of this thread in the pool, within `0..num_threads`.
+    pub fn index(&self) -> usize {
+        self.index
+    }
+
+    /// Gets the string that was specified by `ThreadPoolBuilder::name()`.
+    pub fn name(&self) -> Option<&str> {
+        self.name.as_deref()
+    }
+
+    /// Gets the value that was specified by `ThreadPoolBuilder::stack_size()`.
+    pub fn stack_size(&self) -> Option<usize> {
+        self.stack_size
+    }
+
+    /// Executes the main loop for this thread. This will not return until the
+    /// thread pool is dropped.
+    pub fn run(self) {
+        unsafe { main_loop(self) }
+    }
+}
+
+impl fmt::Debug for ThreadBuilder {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("ThreadBuilder")
+            .field("pool", &self.registry.id())
+            .field("index", &self.index)
+            .field("name", &self.name)
+            .field("stack_size", &self.stack_size)
+            .finish()
+    }
+}
+
+/// Generalized trait for spawning a thread in the `Registry`.
+///
+/// This trait is pub-in-private -- E0445 forces us to make it public,
+/// but we don't actually want to expose these details in the API.
+pub trait ThreadSpawn {
+    private_decl! {}
+
+    /// Spawn a thread with the `ThreadBuilder` parameters, and then
+    /// call `ThreadBuilder::run()`.
+    fn spawn(&mut self, thread: ThreadBuilder) -> io::Result<()>;
+}
+
+/// Spawns a thread in the "normal" way with `std::thread::Builder`.
+///
+/// This type is pub-in-private -- E0445 forces us to make it public,
+/// but we don't actually want to expose these details in the API.
+#[derive(Debug, Default)]
+pub struct DefaultSpawn;
+
+impl ThreadSpawn for DefaultSpawn {
+    private_impl! {}
+
+    fn spawn(&mut self, thread: ThreadBuilder) -> io::Result<()> {
+        let mut b = thread::Builder::new();
+        if let Some(name) = thread.name() {
+            b = b.name(name.to_owned());
+        }
+        if let Some(stack_size) = thread.stack_size() {
+            b = b.stack_size(stack_size);
+        }
+        b.spawn(|| thread.run())?;
+        Ok(())
+    }
+}
+
+/// Spawns a thread with a user's custom callback.
+///
+/// This type is pub-in-private -- E0445 forces us to make it public,
+/// but we don't actually want to expose these details in the API.
+#[derive(Debug)]
+pub struct CustomSpawn<F>(F);
+
+impl<F> CustomSpawn<F>
+where
+    F: FnMut(ThreadBuilder) -> io::Result<()>,
+{
+    pub(super) fn new(spawn: F) -> Self {
+        CustomSpawn(spawn)
+    }
+}
+
+impl<F> ThreadSpawn for CustomSpawn<F>
+where
+    F: FnMut(ThreadBuilder) -> io::Result<()>,
+{
+    private_impl! {}
+
+    #[inline]
+    fn spawn(&mut self, thread: ThreadBuilder) -> io::Result<()> {
+        (self.0)(thread)
+    }
+}
+
+pub(super) struct Registry {
+    logger: Logger,
+    thread_infos: Vec<ThreadInfo>,
+    sleep: Sleep,
+    injected_jobs: Injector<JobRef>,
+    broadcasts: Mutex<Vec<Worker<JobRef>>>,
+    panic_handler: Option<Box<PanicHandler>>,
+    start_handler: Option<Box<StartHandler>>,
+    exit_handler: Option<Box<ExitHandler>>,
+
+    // When this latch reaches 0, it means that all work on this
+    // registry must be complete. This is ensured in the following ways:
+    //
+    // - if this is the global registry, there is a ref-count that never
+    //   gets released.
+    // - if this is a user-created thread-pool, then so long as the thread-pool
+    //   exists, it holds a reference.
+    // - when we inject a "blocking job" into the registry with `ThreadPool::install()`,
+    //   no adjustment is needed; the `ThreadPool` holds the reference, and since we won't
+    //   return until the blocking job is complete, that ref will continue to be held.
+    // - when `join()` or `scope()` is invoked, similarly, no adjustments are needed.
+    //   These are always owned by some other job (e.g., one injected by `ThreadPool::install()`)
+    //   and that job will keep the pool alive.
+    terminate_count: AtomicUsize,
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Initialization
+
+static mut THE_REGISTRY: Option<Arc<Registry>> = None;
+static THE_REGISTRY_SET: Once = Once::new();
+
+/// Starts the worker threads (if that has not already happened). If
+/// initialization has not already occurred, use the default
+/// configuration.
+pub(super) fn global_registry() -> &'static Arc<Registry> {
+    set_global_registry(default_global_registry)
+        .or_else(|err| unsafe { THE_REGISTRY.as_ref().ok_or(err) })
+        .expect("The global thread pool has not been initialized.")
+}
+
+/// Starts the worker threads (if that has not already happened) with
+/// the given builder.
+pub(super) fn init_global_registry<S>(
+    builder: ThreadPoolBuilder<S>,
+) -> Result<&'static Arc<Registry>, ThreadPoolBuildError>
+where
+    S: ThreadSpawn,
+{
+    set_global_registry(|| Registry::new(builder))
+}
+
+/// Starts the worker threads (if that has not already happened)
+/// by creating a registry with the given callback.
+fn set_global_registry<F>(registry: F) -> Result<&'static Arc<Registry>, ThreadPoolBuildError>
+where
+    F: FnOnce() -> Result<Arc<Registry>, ThreadPoolBuildError>,
+{
+    let mut result = Err(ThreadPoolBuildError::new(
+        ErrorKind::GlobalPoolAlreadyInitialized,
+    ));
+
+    THE_REGISTRY_SET.call_once(|| {
+        result = registry()
+            .map(|registry: Arc<Registry>| unsafe { &*THE_REGISTRY.get_or_insert(registry) })
+    });
+
+    result
+}
+
+fn default_global_registry() -> Result<Arc<Registry>, ThreadPoolBuildError> {
+    let result = Registry::new(ThreadPoolBuilder::new());
+
+    // If we're running in an environment that doesn't support threads at all, we can fall back to
+    // using the current thread alone. This is crude, and probably won't work for non-blocking
+    // calls like `spawn` or `broadcast_spawn`, but a lot of stuff does work fine.
+    //
+    // Notably, this allows current WebAssembly targets to work even though their threading support
+    // is stubbed out, and we won't have to change anything if they do add real threading.
+    let unsupported = matches!(&result, Err(e) if e.is_unsupported());
+    if unsupported && WorkerThread::current().is_null() {
+        let builder = ThreadPoolBuilder::new()
+            .num_threads(1)
+            .spawn_handler(|thread| {
+                // Rather than starting a new thread, we're just taking over the current thread
+                // *without* running the main loop, so we can still return from here.
+                // The WorkerThread is leaked, but we never shutdown the global pool anyway.
+                let worker_thread = Box::leak(Box::new(WorkerThread::from(thread)));
+                let registry = &*worker_thread.registry;
+                let index = worker_thread.index;
+
+                unsafe {
+                    WorkerThread::set_current(worker_thread);
+
+                    // let registry know we are ready to do work
+                    Latch::set(&registry.thread_infos[index].primed);
+                }
+
+                Ok(())
+            });
+
+        let fallback_result = Registry::new(builder);
+        if fallback_result.is_ok() {
+            return fallback_result;
+        }
+    }
+
+    result
+}
+
+struct Terminator<'a>(&'a Arc<Registry>);
+
+impl<'a> Drop for Terminator<'a> {
+    fn drop(&mut self) {
+        self.0.terminate()
+    }
+}
+
+impl Registry {
+    pub(super) fn new<S>(
+        mut builder: ThreadPoolBuilder<S>,
+    ) -> Result<Arc<Self>, ThreadPoolBuildError>
+    where
+        S: ThreadSpawn,
+    {
+        // Soft-limit the number of threads that we can actually support.
+        let n_threads = Ord::min(builder.get_num_threads(), crate::max_num_threads());
+
+        let breadth_first = builder.get_breadth_first();
+
+        let (workers, stealers): (Vec<_>, Vec<_>) = (0..n_threads)
+            .map(|_| {
+                let worker = if breadth_first {
+                    Worker::new_fifo()
+                } else {
+                    Worker::new_lifo()
+                };
+
+                let stealer = worker.stealer();
+                (worker, stealer)
+            })
+            .unzip();
+
+        let (broadcasts, broadcast_stealers): (Vec<_>, Vec<_>) = (0..n_threads)
+            .map(|_| {
+                let worker = Worker::new_fifo();
+                let stealer = worker.stealer();
+                (worker, stealer)
+            })
+            .unzip();
+
+        let logger = Logger::new(n_threads);
+        let registry = Arc::new(Registry {
+            logger: logger.clone(),
+            thread_infos: stealers.into_iter().map(ThreadInfo::new).collect(),
+            sleep: Sleep::new(logger, n_threads),
+            injected_jobs: Injector::new(),
+            broadcasts: Mutex::new(broadcasts),
+            terminate_count: AtomicUsize::new(1),
+            panic_handler: builder.take_panic_handler(),
+            start_handler: builder.take_start_handler(),
+            exit_handler: builder.take_exit_handler(),
+        });
+
+        // If we return early or panic, make sure to terminate existing threads.
+        let t1000 = Terminator(&registry);
+
+        for (index, (worker, stealer)) in workers.into_iter().zip(broadcast_stealers).enumerate() {
+            let thread = ThreadBuilder {
+                name: builder.get_thread_name(index),
+                stack_size: builder.get_stack_size(),
+                registry: Arc::clone(&registry),
+                worker,
+                stealer,
+                index,
+            };
+            if let Err(e) = builder.get_spawn_handler().spawn(thread) {
+                return Err(ThreadPoolBuildError::new(ErrorKind::IOError(e)));
+            }
+        }
+
+        // Returning normally now, without termination.
+        mem::forget(t1000);
+
+        Ok(registry)
+    }
+
+    pub(super) fn current() -> Arc<Registry> {
+        unsafe {
+            let worker_thread = WorkerThread::current();
+            let registry = if worker_thread.is_null() {
+                global_registry()
+            } else {
+                &(*worker_thread).registry
+            };
+            Arc::clone(registry)
+        }
+    }
+
+    /// Returns the number of threads in the current registry.  This
+    /// is better than `Registry::current().num_threads()` because it
+    /// avoids incrementing the `Arc`.
+    pub(super) fn current_num_threads() -> usize {
+        unsafe {
+            let worker_thread = WorkerThread::current();
+            if worker_thread.is_null() {
+                global_registry().num_threads()
+            } else {
+                (*worker_thread).registry.num_threads()
+            }
+        }
+    }
+
+    /// Returns the current `WorkerThread` if it's part of this `Registry`.
+    pub(super) fn current_thread(&self) -> Option<&WorkerThread> {
+        unsafe {
+            let worker = WorkerThread::current().as_ref()?;
+            if worker.registry().id() == self.id() {
+                Some(worker)
+            } else {
+                None
+            }
+        }
+    }
+
+    /// Returns an opaque identifier for this registry.
+    pub(super) fn id(&self) -> RegistryId {
+        // We can rely on `self` not to change since we only ever create
+        // registries that are boxed up in an `Arc` (see `new()` above).
+        RegistryId {
+            addr: self as *const Self as usize,
+        }
+    }
+
+    #[inline]
+    pub(super) fn log(&self, event: impl FnOnce() -> crate::log::Event) {
+        self.logger.log(event)
+    }
+
+    pub(super) fn num_threads(&self) -> usize {
+        self.thread_infos.len()
+    }
+
+    pub(super) fn catch_unwind(&self, f: impl FnOnce()) {
+        if let Err(err) = unwind::halt_unwinding(f) {
+            // If there is no handler, or if that handler itself panics, then we abort.
+            let abort_guard = unwind::AbortIfPanic;
+            if let Some(ref handler) = self.panic_handler {
+                handler(err);
+                mem::forget(abort_guard);
+            }
+        }
+    }
+
+    /// Waits for the worker threads to get up and running.  This is
+    /// meant to be used for benchmarking purposes, primarily, so that
+    /// you can get more consistent numbers by having everything
+    /// "ready to go".
+    pub(super) fn wait_until_primed(&self) {
+        for info in &self.thread_infos {
+            info.primed.wait();
+        }
+    }
+
+    /// Waits for the worker threads to stop. This is used for testing
+    /// -- so we can check that termination actually works.
+    #[cfg(test)]
+    pub(super) fn wait_until_stopped(&self) {
+        for info in &self.thread_infos {
+            info.stopped.wait();
+        }
+    }
+
+    /// ////////////////////////////////////////////////////////////////////////
+    /// MAIN LOOP
+    ///
+    /// So long as all of the worker threads are hanging out in their
+    /// top-level loop, there is no work to be done.
+
+    /// Push a job into the given `registry`. If we are running on a
+    /// worker thread for the registry, this will push onto the
+    /// deque. Else, it will inject from the outside (which is slower).
+    pub(super) fn inject_or_push(&self, job_ref: JobRef) {
+        let worker_thread = WorkerThread::current();
+        unsafe {
+            if !worker_thread.is_null() && (*worker_thread).registry().id() == self.id() {
+                (*worker_thread).push(job_ref);
+            } else {
+                self.inject(job_ref);
+            }
+        }
+    }
+
+    /// Push a job into the "external jobs" queue; it will be taken by
+    /// whatever worker has nothing to do. Use this if you know that
+    /// you are not on a worker of this registry.
+    pub(super) fn inject(&self, injected_job: JobRef) {
+        self.log(|| JobsInjected { count: 1 });
+
+        // It should not be possible for `state.terminate` to be true
+        // here. It is only set to true when the user creates (and
+        // drops) a `ThreadPool`; and, in that case, they cannot be
+        // calling `inject()` later, since they dropped their
+        // `ThreadPool`.
+        debug_assert_ne!(
+            self.terminate_count.load(Ordering::Acquire),
+            0,
+            "inject() sees state.terminate as true"
+        );
+
+        let queue_was_empty = self.injected_jobs.is_empty();
+
+        self.injected_jobs.push(injected_job);
+        self.sleep.new_injected_jobs(usize::MAX, 1, queue_was_empty);
+    }
+
+    fn has_injected_job(&self) -> bool {
+        !self.injected_jobs.is_empty()
+    }
+
+    fn pop_injected_job(&self, worker_index: usize) -> Option<JobRef> {
+        loop {
+            match self.injected_jobs.steal() {
+                Steal::Success(job) => {
+                    self.log(|| JobUninjected {
+                        worker: worker_index,
+                    });
+                    return Some(job);
+                }
+                Steal::Empty => return None,
+                Steal::Retry => {}
+            }
+        }
+    }
+
+    /// Push a job into each thread's own "external jobs" queue; it will be
+    /// executed only on that thread, when it has nothing else to do locally,
+    /// before it tries to steal other work.
+    ///
+    /// **Panics** if not given exactly as many jobs as there are threads.
+    pub(super) fn inject_broadcast(&self, injected_jobs: impl ExactSizeIterator<Item = JobRef>) {
+        assert_eq!(self.num_threads(), injected_jobs.len());
+        self.log(|| JobBroadcast {
+            count: self.num_threads(),
+        });
+        {
+            let broadcasts = self.broadcasts.lock().unwrap();
+
+            // It should not be possible for `state.terminate` to be true
+            // here. It is only set to true when the user creates (and
+            // drops) a `ThreadPool`; and, in that case, they cannot be
+            // calling `inject_broadcast()` later, since they dropped their
+            // `ThreadPool`.
+            debug_assert_ne!(
+                self.terminate_count.load(Ordering::Acquire),
+                0,
+                "inject_broadcast() sees state.terminate as true"
+            );
+
+            assert_eq!(broadcasts.len(), injected_jobs.len());
+            for (worker, job_ref) in broadcasts.iter().zip(injected_jobs) {
+                worker.push(job_ref);
+            }
+        }
+        for i in 0..self.num_threads() {
+            self.sleep.notify_worker_latch_is_set(i);
+        }
+    }
+
+    /// If already in a worker-thread of this registry, just execute `op`.
+    /// Otherwise, inject `op` in this thread-pool. Either way, block until `op`
+    /// completes and return its return value. If `op` panics, that panic will
+    /// be propagated as well.  The second argument indicates `true` if injection
+    /// was performed, `false` if executed directly.
+    pub(super) fn in_worker<OP, R>(&self, op: OP) -> R
+    where
+        OP: FnOnce(&WorkerThread, bool) -> R + Send,
+        R: Send,
+    {
+        unsafe {
+            let worker_thread = WorkerThread::current();
+            if worker_thread.is_null() {
+                self.in_worker_cold(op)
+            } else if (*worker_thread).registry().id() != self.id() {
+                self.in_worker_cross(&*worker_thread, op)
+            } else {
+                // Perfectly valid to give them a `&T`: this is the
+                // current thread, so we know the data structure won't be
+                // invalidated until we return.
+                op(&*worker_thread, false)
+            }
+        }
+    }
+
+    #[cold]
+    unsafe fn in_worker_cold<OP, R>(&self, op: OP) -> R
+    where
+        OP: FnOnce(&WorkerThread, bool) -> R + Send,
+        R: Send,
+    {
+        thread_local!(static LOCK_LATCH: LockLatch = LockLatch::new());
+
+        LOCK_LATCH.with(|l| {
+            // This thread isn't a member of *any* thread pool, so just block.
+            debug_assert!(WorkerThread::current().is_null());
+            let job = StackJob::new(
+                |injected| {
+                    let worker_thread = WorkerThread::current();
+                    assert!(injected && !worker_thread.is_null());
+                    op(&*worker_thread, true)
+                },
+                LatchRef::new(l),
+            );
+            self.inject(job.as_job_ref());
+            job.latch.wait_and_reset(); // Make sure we can use the same latch again next time.
+
+            // flush accumulated logs as we exit the thread
+            self.logger.log(|| Flush);
+
+            job.into_result()
+        })
+    }
+
+    #[cold]
+    unsafe fn in_worker_cross<OP, R>(&self, current_thread: &WorkerThread, op: OP) -> R
+    where
+        OP: FnOnce(&WorkerThread, bool) -> R + Send,
+        R: Send,
+    {
+        // This thread is a member of a different pool, so let it process
+        // other work while waiting for this `op` to complete.
+        debug_assert!(current_thread.registry().id() != self.id());
+        let latch = SpinLatch::cross(current_thread);
+        let job = StackJob::new(
+            |injected| {
+                let worker_thread = WorkerThread::current();
+                assert!(injected && !worker_thread.is_null());
+                op(&*worker_thread, true)
+            },
+            latch,
+        );
+        self.inject(job.as_job_ref());
+        current_thread.wait_until(&job.latch);
+        job.into_result()
+    }
+
+    /// Increments the terminate counter. This increment should be
+    /// balanced by a call to `terminate`, which will decrement. This
+    /// is used when spawning asynchronous work, which needs to
+    /// prevent the registry from terminating so long as it is active.
+    ///
+    /// Note that blocking functions such as `join` and `scope` do not
+    /// need to concern themselves with this fn; their context is
+    /// responsible for ensuring the current thread-pool will not
+    /// terminate until they return.
+    ///
+    /// The global thread-pool always has an outstanding reference
+    /// (the initial one). Custom thread-pools have one outstanding
+    /// reference that is dropped when the `ThreadPool` is dropped:
+    /// since installing the thread-pool blocks until any joins/scopes
+    /// complete, this ensures that joins/scopes are covered.
+    ///
+    /// The exception is `::spawn()`, which can create a job outside
+    /// of any blocking scope. In that case, the job itself holds a
+    /// terminate count and is responsible for invoking `terminate()`
+    /// when finished.
+    pub(super) fn increment_terminate_count(&self) {
+        let previous = self.terminate_count.fetch_add(1, Ordering::AcqRel);
+        debug_assert!(previous != 0, "registry ref count incremented from zero");
+        assert!(
+            previous != std::usize::MAX,
+            "overflow in registry ref count"
+        );
+    }
+
+    /// Signals that the thread-pool which owns this registry has been
+    /// dropped. The worker threads will gradually terminate, once any
+    /// extant work is completed.
+    pub(super) fn terminate(&self) {
+        if self.terminate_count.fetch_sub(1, Ordering::AcqRel) == 1 {
+            for (i, thread_info) in self.thread_infos.iter().enumerate() {
+                unsafe { CountLatch::set_and_tickle_one(&thread_info.terminate, self, i) };
+            }
+        }
+    }
+
+    /// Notify the worker that the latch they are sleeping on has been "set".
+    pub(super) fn notify_worker_latch_is_set(&self, target_worker_index: usize) {
+        self.sleep.notify_worker_latch_is_set(target_worker_index);
+    }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub(super) struct RegistryId {
+    addr: usize,
+}
+
+struct ThreadInfo {
+    /// Latch set once thread has started and we are entering into the
+    /// main loop. Used to wait for worker threads to become primed,
+    /// primarily of interest for benchmarking.
+    primed: LockLatch,
+
+    /// Latch is set once worker thread has completed. Used to wait
+    /// until workers have stopped; only used for tests.
+    stopped: LockLatch,
+
+    /// The latch used to signal that terminated has been requested.
+    /// This latch is *set* by the `terminate` method on the
+    /// `Registry`, once the registry's main "terminate" counter
+    /// reaches zero.
+    ///
+    /// NB. We use a `CountLatch` here because it has no lifetimes and is
+    /// meant for async use, but the count never gets higher than one.
+    terminate: CountLatch,
+
+    /// the "stealer" half of the worker's deque
+    stealer: Stealer<JobRef>,
+}
+
+impl ThreadInfo {
+    fn new(stealer: Stealer<JobRef>) -> ThreadInfo {
+        ThreadInfo {
+            primed: LockLatch::new(),
+            stopped: LockLatch::new(),
+            terminate: CountLatch::new(),
+            stealer,
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// WorkerThread identifiers
+
+pub(super) struct WorkerThread {
+    /// the "worker" half of our local deque
+    worker: Worker<JobRef>,
+
+    /// the "stealer" half of the worker's broadcast deque
+    stealer: Stealer<JobRef>,
+
+    /// local queue used for `spawn_fifo` indirection
+    fifo: JobFifo,
+
+    index: usize,
+
+    /// A weak random number generator.
+    rng: XorShift64Star,
+
+    registry: Arc<Registry>,
+}
+
+// This is a bit sketchy, but basically: the WorkerThread is
+// allocated on the stack of the worker on entry and stored into this
+// thread local variable. So it will remain valid at least until the
+// worker is fully unwound. Using an unsafe pointer avoids the need
+// for a RefCell<T> etc.
+thread_local! {
+    static WORKER_THREAD_STATE: Cell<*const WorkerThread> = const { Cell::new(ptr::null()) };
+}
+
+impl From<ThreadBuilder> for WorkerThread {
+    fn from(thread: ThreadBuilder) -> Self {
+        Self {
+            worker: thread.worker,
+            stealer: thread.stealer,
+            fifo: JobFifo::new(),
+            index: thread.index,
+            rng: XorShift64Star::new(),
+            registry: thread.registry,
+        }
+    }
+}
+
+impl Drop for WorkerThread {
+    fn drop(&mut self) {
+        // Undo `set_current`
+        WORKER_THREAD_STATE.with(|t| {
+            assert!(t.get().eq(&(self as *const _)));
+            t.set(ptr::null());
+        });
+    }
+}
+
+impl WorkerThread {
+    /// Gets the `WorkerThread` index for the current thread; returns
+    /// NULL if this is not a worker thread. This pointer is valid
+    /// anywhere on the current thread.
+    #[inline]
+    pub(super) fn current() -> *const WorkerThread {
+        WORKER_THREAD_STATE.with(Cell::get)
+    }
+
+    /// Sets `self` as the worker thread index for the current thread.
+    /// This is done during worker thread startup.
+    unsafe fn set_current(thread: *const WorkerThread) {
+        WORKER_THREAD_STATE.with(|t| {
+            assert!(t.get().is_null());
+            t.set(thread);
+        });
+    }
+
+    /// Returns the registry that owns this worker thread.
+    #[inline]
+    pub(super) fn registry(&self) -> &Arc<Registry> {
+        &self.registry
+    }
+
+    #[inline]
+    pub(super) fn log(&self, event: impl FnOnce() -> crate::log::Event) {
+        self.registry.logger.log(event)
+    }
+
+    /// Our index amongst the worker threads (ranges from `0..self.num_threads()`).
+    #[inline]
+    pub(super) fn index(&self) -> usize {
+        self.index
+    }
+
+    #[inline]
+    pub(super) unsafe fn push(&self, job: JobRef) {
+        self.log(|| JobPushed { worker: self.index });
+        let queue_was_empty = self.worker.is_empty();
+        self.worker.push(job);
+        self.registry
+            .sleep
+            .new_internal_jobs(self.index, 1, queue_was_empty);
+    }
+
+    #[inline]
+    pub(super) unsafe fn push_fifo(&self, job: JobRef) {
+        self.push(self.fifo.push(job));
+    }
+
+    #[inline]
+    pub(super) fn local_deque_is_empty(&self) -> bool {
+        self.worker.is_empty()
+    }
+
+    /// Attempts to obtain a "local" job -- typically this means
+    /// popping from the top of the stack, though if we are configured
+    /// for breadth-first execution, it would mean dequeuing from the
+    /// bottom.
+    #[inline]
+    pub(super) fn take_local_job(&self) -> Option<JobRef> {
+        let popped_job = self.worker.pop();
+
+        if popped_job.is_some() {
+            self.log(|| JobPopped { worker: self.index });
+            return popped_job;
+        }
+
+        loop {
+            match self.stealer.steal() {
+                Steal::Success(job) => return Some(job),
+                Steal::Empty => return None,
+                Steal::Retry => {}
+            }
+        }
+    }
+
+    fn has_injected_job(&self) -> bool {
+        !self.stealer.is_empty() || self.registry.has_injected_job()
+    }
+
+    /// Wait until the latch is set. Try to keep busy by popping and
+    /// stealing tasks as necessary.
+    #[inline]
+    pub(super) unsafe fn wait_until<L: AsCoreLatch + ?Sized>(&self, latch: &L) {
+        let latch = latch.as_core_latch();
+        if !latch.probe() {
+            self.wait_until_cold(latch);
+        }
+    }
+
+    #[cold]
+    unsafe fn wait_until_cold(&self, latch: &CoreLatch) {
+        // the code below should swallow all panics and hence never
+        // unwind; but if something does wrong, we want to abort,
+        // because otherwise other code in rayon may assume that the
+        // latch has been signaled, and that can lead to random memory
+        // accesses, which would be *very bad*
+        let abort_guard = unwind::AbortIfPanic;
+
+        let mut idle_state = self.registry.sleep.start_looking(self.index, latch);
+        while !latch.probe() {
+            if let Some(job) = self.find_work() {
+                self.registry.sleep.work_found(idle_state);
+                self.execute(job);
+                idle_state = self.registry.sleep.start_looking(self.index, latch);
+            } else {
+                self.registry
+                    .sleep
+                    .no_work_found(&mut idle_state, latch, || self.has_injected_job())
+            }
+        }
+
+        // If we were sleepy, we are not anymore. We "found work" --
+        // whatever the surrounding thread was doing before it had to
+        // wait.
+        self.registry.sleep.work_found(idle_state);
+
+        self.log(|| ThreadSawLatchSet {
+            worker: self.index,
+            latch_addr: latch.addr(),
+        });
+        mem::forget(abort_guard); // successful execution, do not abort
+    }
+
+    fn find_work(&self) -> Option<JobRef> {
+        // Try to find some work to do. We give preference first
+        // to things in our local deque, then in other workers
+        // deques, and finally to injected jobs from the
+        // outside. The idea is to finish what we started before
+        // we take on something new.
+        self.take_local_job()
+            .or_else(|| self.steal())
+            .or_else(|| self.registry.pop_injected_job(self.index))
+    }
+
+    pub(super) fn yield_now(&self) -> Yield {
+        match self.find_work() {
+            Some(job) => unsafe {
+                self.execute(job);
+                Yield::Executed
+            },
+            None => Yield::Idle,
+        }
+    }
+
+    pub(super) fn yield_local(&self) -> Yield {
+        match self.take_local_job() {
+            Some(job) => unsafe {
+                self.execute(job);
+                Yield::Executed
+            },
+            None => Yield::Idle,
+        }
+    }
+
+    #[inline]
+    pub(super) unsafe fn execute(&self, job: JobRef) {
+        job.execute();
+    }
+
+    /// Try to steal a single job and return it.
+    ///
+    /// This should only be done as a last resort, when there is no
+    /// local work to do.
+    fn steal(&self) -> Option<JobRef> {
+        // we only steal when we don't have any work to do locally
+        debug_assert!(self.local_deque_is_empty());
+
+        // otherwise, try to steal
+        let thread_infos = &self.registry.thread_infos.as_slice();
+        let num_threads = thread_infos.len();
+        if num_threads <= 1 {
+            return None;
+        }
+
+        loop {
+            let mut retry = false;
+            let start = self.rng.next_usize(num_threads);
+            let job = (start..num_threads)
+                .chain(0..start)
+                .filter(move |&i| i != self.index)
+                .find_map(|victim_index| {
+                    let victim = &thread_infos[victim_index];
+                    match victim.stealer.steal() {
+                        Steal::Success(job) => {
+                            self.log(|| JobStolen {
+                                worker: self.index,
+                                victim: victim_index,
+                            });
+                            Some(job)
+                        }
+                        Steal::Empty => None,
+                        Steal::Retry => {
+                            retry = true;
+                            None
+                        }
+                    }
+                });
+            if job.is_some() || !retry {
+                return job;
+            }
+        }
+    }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+
+unsafe fn main_loop(thread: ThreadBuilder) {
+    let worker_thread = &WorkerThread::from(thread);
+    WorkerThread::set_current(worker_thread);
+    let registry = &*worker_thread.registry;
+    let index = worker_thread.index;
+
+    // let registry know we are ready to do work
+    Latch::set(&registry.thread_infos[index].primed);
+
+    // Worker threads should not panic. If they do, just abort, as the
+    // internal state of the threadpool is corrupted. Note that if
+    // **user code** panics, we should catch that and redirect.
+    let abort_guard = unwind::AbortIfPanic;
+
+    // Inform a user callback that we started a thread.
+    if let Some(ref handler) = registry.start_handler {
+        registry.catch_unwind(|| handler(index));
+    }
+
+    let my_terminate_latch = &registry.thread_infos[index].terminate;
+    worker_thread.log(|| ThreadStart {
+        worker: index,
+        terminate_addr: my_terminate_latch.as_core_latch().addr(),
+    });
+    worker_thread.wait_until(my_terminate_latch);
+
+    // Should not be any work left in our queue.
+    debug_assert!(worker_thread.take_local_job().is_none());
+
+    // let registry know we are done
+    Latch::set(&registry.thread_infos[index].stopped);
+
+    // Normal termination, do not abort.
+    mem::forget(abort_guard);
+
+    worker_thread.log(|| ThreadTerminate { worker: index });
+
+    // Inform a user callback that we exited a thread.
+    if let Some(ref handler) = registry.exit_handler {
+        registry.catch_unwind(|| handler(index));
+        // We're already exiting the thread, there's nothing else to do.
+    }
+}
+
+/// If already in a worker-thread, just execute `op`.  Otherwise,
+/// execute `op` in the default thread-pool. Either way, block until
+/// `op` completes and return its return value. If `op` panics, that
+/// panic will be propagated as well.  The second argument indicates
+/// `true` if injection was performed, `false` if executed directly.
+pub(super) fn in_worker<OP, R>(op: OP) -> R
+where
+    OP: FnOnce(&WorkerThread, bool) -> R + Send,
+    R: Send,
+{
+    unsafe {
+        let owner_thread = WorkerThread::current();
+        if !owner_thread.is_null() {
+            // Perfectly valid to give them a `&T`: this is the
+            // current thread, so we know the data structure won't be
+            // invalidated until we return.
+            op(&*owner_thread, false)
+        } else {
+            global_registry().in_worker(op)
+        }
+    }
+}
+
+/// [xorshift*] is a fast pseudorandom number generator which will
+/// even tolerate weak seeding, as long as it's not zero.
+///
+/// [xorshift*]: https://en.wikipedia.org/wiki/Xorshift#xorshift*
+struct XorShift64Star {
+    state: Cell<u64>,
+}
+
+impl XorShift64Star {
+    fn new() -> Self {
+        // Any non-zero seed will do -- this uses the hash of a global counter.
+        let mut seed = 0;
+        while seed == 0 {
+            let mut hasher = DefaultHasher::new();
+            static COUNTER: AtomicUsize = AtomicUsize::new(0);
+            hasher.write_usize(COUNTER.fetch_add(1, Ordering::Relaxed));
+            seed = hasher.finish();
+        }
+
+        XorShift64Star {
+            state: Cell::new(seed),
+        }
+    }
+
+    fn next(&self) -> u64 {
+        let mut x = self.state.get();
+        debug_assert_ne!(x, 0);
+        x ^= x >> 12;
+        x ^= x << 25;
+        x ^= x >> 27;
+        self.state.set(x);
+        x.wrapping_mul(0x2545_f491_4f6c_dd1d)
+    }
+
+    /// Return a value from `0..n`.
+    fn next_usize(&self, n: usize) -> usize {
+        (self.next() % n as u64) as usize
+    }
+}
diff --git a/rayon/rayon-core/src/scope/mod.rs b/rayon/rayon-core/src/scope/mod.rs
new file mode 100644
index 0000000..f460dd7
--- /dev/null
+++ b/rayon/rayon-core/src/scope/mod.rs
@@ -0,0 +1,865 @@
+//! Methods for custom fork-join scopes, created by the [`scope()`]
+//! and [`in_place_scope()`] functions. These are a more flexible alternative to [`join()`].
+//!
+//! [`scope()`]: fn.scope.html
+//! [`in_place_scope()`]: fn.in_place_scope.html
+//! [`join()`]: ../join/join.fn.html
+
+use crate::broadcast::BroadcastContext;
+use crate::job::{ArcJob, HeapJob, JobFifo, JobRef};
+use crate::latch::{CountLatch, CountLockLatch, Latch};
+use crate::registry::{global_registry, in_worker, Registry, WorkerThread};
+use crate::unwind;
+use std::any::Any;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem::ManuallyDrop;
+use std::ptr;
+use std::sync::atomic::{AtomicPtr, Ordering};
+use std::sync::Arc;
+
+#[cfg(test)]
+mod test;
+
+/// Represents a fork-join scope which can be used to spawn any number of tasks.
+/// See [`scope()`] for more information.
+///
+///[`scope()`]: fn.scope.html
+pub struct Scope<'scope> {
+    base: ScopeBase<'scope>,
+}
+
+/// Represents a fork-join scope which can be used to spawn any number of tasks.
+/// Those spawned from the same thread are prioritized in relative FIFO order.
+/// See [`scope_fifo()`] for more information.
+///
+///[`scope_fifo()`]: fn.scope_fifo.html
+pub struct ScopeFifo<'scope> {
+    base: ScopeBase<'scope>,
+    fifos: Vec<JobFifo>,
+}
+
+pub(super) enum ScopeLatch {
+    /// A latch for scopes created on a rayon thread which will participate in work-
+    /// stealing while it waits for completion. This thread is not necessarily part
+    /// of the same registry as the scope itself!
+    Stealing {
+        latch: CountLatch,
+        /// If a worker thread in registry A calls `in_place_scope` on a ThreadPool
+        /// with registry B, when a job completes in a thread of registry B, we may
+        /// need to call `latch.set_and_tickle_one()` to wake the thread in registry A.
+        /// That means we need a reference to registry A (since at that point we will
+        /// only have a reference to registry B), so we stash it here.
+        registry: Arc<Registry>,
+        /// The index of the worker to wake in `registry`
+        worker_index: usize,
+    },
+
+    /// A latch for scopes created on a non-rayon thread which will block to wait.
+    Blocking { latch: CountLockLatch },
+}
+
+struct ScopeBase<'scope> {
+    /// thread registry where `scope()` was executed or where `in_place_scope()`
+    /// should spawn jobs.
+    registry: Arc<Registry>,
+
+    /// if some job panicked, the error is stored here; it will be
+    /// propagated to the one who created the scope
+    panic: AtomicPtr<Box<dyn Any + Send + 'static>>,
+
+    /// latch to track job counts
+    job_completed_latch: ScopeLatch,
+
+    /// You can think of a scope as containing a list of closures to execute,
+    /// all of which outlive `'scope`.  They're not actually required to be
+    /// `Sync`, but it's still safe to let the `Scope` implement `Sync` because
+    /// the closures are only *moved* across threads to be executed.
+    marker: PhantomData<Box<dyn FnOnce(&Scope<'scope>) + Send + Sync + 'scope>>,
+}
+
+/// Creates a "fork-join" scope `s` and invokes the closure with a
+/// reference to `s`. This closure can then spawn asynchronous tasks
+/// into `s`. Those tasks may run asynchronously with respect to the
+/// closure; they may themselves spawn additional tasks into `s`. When
+/// the closure returns, it will block until all tasks that have been
+/// spawned into `s` complete.
+///
+/// `scope()` is a more flexible building block compared to `join()`,
+/// since a loop can be used to spawn any number of tasks without
+/// recursing. However, that flexibility comes at a performance price:
+/// tasks spawned using `scope()` must be allocated onto the heap,
+/// whereas `join()` can make exclusive use of the stack. **Prefer
+/// `join()` (or, even better, parallel iterators) where possible.**
+///
+/// # Example
+///
+/// The Rayon `join()` function launches two closures and waits for them
+/// to stop. One could implement `join()` using a scope like so, although
+/// it would be less efficient than the real implementation:
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// pub fn join<A,B,RA,RB>(oper_a: A, oper_b: B) -> (RA, RB)
+///     where A: FnOnce() -> RA + Send,
+///           B: FnOnce() -> RB + Send,
+///           RA: Send,
+///           RB: Send,
+/// {
+///     let mut result_a: Option<RA> = None;
+///     let mut result_b: Option<RB> = None;
+///     rayon::scope(|s| {
+///         s.spawn(|_| result_a = Some(oper_a()));
+///         s.spawn(|_| result_b = Some(oper_b()));
+///     });
+///     (result_a.unwrap(), result_b.unwrap())
+/// }
+/// ```
+///
+/// # A note on threading
+///
+/// The closure given to `scope()` executes in the Rayon thread-pool,
+/// as do those given to `spawn()`. This means that you can't access
+/// thread-local variables (well, you can, but they may have
+/// unexpected values).
+///
+/// # Task execution
+///
+/// Task execution potentially starts as soon as `spawn()` is called.
+/// The task will end sometime before `scope()` returns. Note that the
+/// *closure* given to scope may return much earlier. In general
+/// the lifetime of a scope created like `scope(body)` goes something like this:
+///
+/// - Scope begins when `scope(body)` is called
+/// - Scope body `body()` is invoked
+///     - Scope tasks may be spawned
+/// - Scope body returns
+/// - Scope tasks execute, possibly spawning more tasks
+/// - Once all tasks are done, scope ends and `scope()` returns
+///
+/// To see how and when tasks are joined, consider this example:
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// // point start
+/// rayon::scope(|s| {
+///     s.spawn(|s| { // task s.1
+///         s.spawn(|s| { // task s.1.1
+///             rayon::scope(|t| {
+///                 t.spawn(|_| ()); // task t.1
+///                 t.spawn(|_| ()); // task t.2
+///             });
+///         });
+///     });
+///     s.spawn(|s| { // task s.2
+///     });
+///     // point mid
+/// });
+/// // point end
+/// ```
+///
+/// The various tasks that are run will execute roughly like so:
+///
+/// ```notrust
+/// | (start)
+/// |
+/// | (scope `s` created)
+/// +-----------------------------------------------+ (task s.2)
+/// +-------+ (task s.1)                            |
+/// |       |                                       |
+/// |       +---+ (task s.1.1)                      |
+/// |       |   |                                   |
+/// |       |   | (scope `t` created)               |
+/// |       |   +----------------+ (task t.2)       |
+/// |       |   +---+ (task t.1) |                  |
+/// | (mid) |   |   |            |                  |
+/// :       |   + <-+------------+ (scope `t` ends) |
+/// :       |   |                                   |
+/// |<------+---+-----------------------------------+ (scope `s` ends)
+/// |
+/// | (end)
+/// ```
+///
+/// The point here is that everything spawned into scope `s` will
+/// terminate (at latest) at the same point -- right before the
+/// original call to `rayon::scope` returns. This includes new
+/// subtasks created by other subtasks (e.g., task `s.1.1`). If a new
+/// scope is created (such as `t`), the things spawned into that scope
+/// will be joined before that scope returns, which in turn occurs
+/// before the creating task (task `s.1.1` in this case) finishes.
+///
+/// There is no guaranteed order of execution for spawns in a scope,
+/// given that other threads may steal tasks at any time. However, they
+/// are generally prioritized in a LIFO order on the thread from which
+/// they were spawned. So in this example, absent any stealing, we can
+/// expect `s.2` to execute before `s.1`, and `t.2` before `t.1`. Other
+/// threads always steal from the other end of the deque, like FIFO
+/// order.  The idea is that "recent" tasks are most likely to be fresh
+/// in the local CPU's cache, while other threads can steal older
+/// "stale" tasks.  For an alternate approach, consider
+/// [`scope_fifo()`] instead.
+///
+/// [`scope_fifo()`]: fn.scope_fifo.html
+///
+/// # Accessing stack data
+///
+/// In general, spawned tasks may access stack data in place that
+/// outlives the scope itself. Other data must be fully owned by the
+/// spawned task.
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// let ok: Vec<i32> = vec![1, 2, 3];
+/// rayon::scope(|s| {
+///     let bad: Vec<i32> = vec![4, 5, 6];
+///     s.spawn(|_| {
+///         // We can access `ok` because outlives the scope `s`.
+///         println!("ok: {:?}", ok);
+///
+///         // If we just try to use `bad` here, the closure will borrow `bad`
+///         // (because we are just printing it out, and that only requires a
+///         // borrow), which will result in a compilation error. Read on
+///         // for options.
+///         // println!("bad: {:?}", bad);
+///    });
+/// });
+/// ```
+///
+/// As the comments example above suggest, to reference `bad` we must
+/// take ownership of it. One way to do this is to detach the closure
+/// from the surrounding stack frame, using the `move` keyword. This
+/// will cause it to take ownership of *all* the variables it touches,
+/// in this case including both `ok` *and* `bad`:
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// let ok: Vec<i32> = vec![1, 2, 3];
+/// rayon::scope(|s| {
+///     let bad: Vec<i32> = vec![4, 5, 6];
+///     s.spawn(move |_| {
+///         println!("ok: {:?}", ok);
+///         println!("bad: {:?}", bad);
+///     });
+///
+///     // That closure is fine, but now we can't use `ok` anywhere else,
+///     // since it is owned by the previous task:
+///     // s.spawn(|_| println!("ok: {:?}", ok));
+/// });
+/// ```
+///
+/// While this works, it could be a problem if we want to use `ok` elsewhere.
+/// There are two choices. We can keep the closure as a `move` closure, but
+/// instead of referencing the variable `ok`, we create a shadowed variable that
+/// is a borrow of `ok` and capture *that*:
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// let ok: Vec<i32> = vec![1, 2, 3];
+/// rayon::scope(|s| {
+///     let bad: Vec<i32> = vec![4, 5, 6];
+///     let ok: &Vec<i32> = &ok; // shadow the original `ok`
+///     s.spawn(move |_| {
+///         println!("ok: {:?}", ok); // captures the shadowed version
+///         println!("bad: {:?}", bad);
+///     });
+///
+///     // Now we too can use the shadowed `ok`, since `&Vec<i32>` references
+///     // can be shared freely. Note that we need a `move` closure here though,
+///     // because otherwise we'd be trying to borrow the shadowed `ok`,
+///     // and that doesn't outlive `scope`.
+///     s.spawn(move |_| println!("ok: {:?}", ok));
+/// });
+/// ```
+///
+/// Another option is not to use the `move` keyword but instead to take ownership
+/// of individual variables:
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// let ok: Vec<i32> = vec![1, 2, 3];
+/// rayon::scope(|s| {
+///     let bad: Vec<i32> = vec![4, 5, 6];
+///     s.spawn(|_| {
+///         // Transfer ownership of `bad` into a local variable (also named `bad`).
+///         // This will force the closure to take ownership of `bad` from the environment.
+///         let bad = bad;
+///         println!("ok: {:?}", ok); // `ok` is only borrowed.
+///         println!("bad: {:?}", bad); // refers to our local variable, above.
+///     });
+///
+///     s.spawn(|_| println!("ok: {:?}", ok)); // we too can borrow `ok`
+/// });
+/// ```
+///
+/// # Panics
+///
+/// If a panic occurs, either in the closure given to `scope()` or in
+/// any of the spawned jobs, that panic will be propagated and the
+/// call to `scope()` will panic. If multiple panics occurs, it is
+/// non-deterministic which of their panic values will propagate.
+/// Regardless, once a task is spawned using `scope.spawn()`, it will
+/// execute, even if the spawning task should later panic. `scope()`
+/// returns once all spawned jobs have completed, and any panics are
+/// propagated at that point.
+pub fn scope<'scope, OP, R>(op: OP) -> R
+where
+    OP: FnOnce(&Scope<'scope>) -> R + Send,
+    R: Send,
+{
+    in_worker(|owner_thread, _| {
+        let scope = Scope::<'scope>::new(Some(owner_thread), None);
+        scope.base.complete(Some(owner_thread), || op(&scope))
+    })
+}
+
+/// Creates a "fork-join" scope `s` with FIFO order, and invokes the
+/// closure with a reference to `s`. This closure can then spawn
+/// asynchronous tasks into `s`. Those tasks may run asynchronously with
+/// respect to the closure; they may themselves spawn additional tasks
+/// into `s`. When the closure returns, it will block until all tasks
+/// that have been spawned into `s` complete.
+///
+/// # Task execution
+///
+/// Tasks in a `scope_fifo()` run similarly to [`scope()`], but there's a
+/// difference in the order of execution. Consider a similar example:
+///
+/// [`scope()`]: fn.scope.html
+///
+/// ```rust
+/// # use rayon_core as rayon;
+/// // point start
+/// rayon::scope_fifo(|s| {
+///     s.spawn_fifo(|s| { // task s.1
+///         s.spawn_fifo(|s| { // task s.1.1
+///             rayon::scope_fifo(|t| {
+///                 t.spawn_fifo(|_| ()); // task t.1
+///                 t.spawn_fifo(|_| ()); // task t.2
+///             });
+///         });
+///     });
+///     s.spawn_fifo(|s| { // task s.2
+///     });
+///     // point mid
+/// });
+/// // point end
+/// ```
+///
+/// The various tasks that are run will execute roughly like so:
+///
+/// ```notrust
+/// | (start)
+/// |
+/// | (FIFO scope `s` created)
+/// +--------------------+ (task s.1)
+/// +-------+ (task s.2) |
+/// |       |            +---+ (task s.1.1)
+/// |       |            |   |
+/// |       |            |   | (FIFO scope `t` created)
+/// |       |            |   +----------------+ (task t.1)
+/// |       |            |   +---+ (task t.2) |
+/// | (mid) |            |   |   |            |
+/// :       |            |   + <-+------------+ (scope `t` ends)
+/// :       |            |   |
+/// |<------+------------+---+ (scope `s` ends)
+/// |
+/// | (end)
+/// ```
+///
+/// Under `scope_fifo()`, the spawns are prioritized in a FIFO order on
+/// the thread from which they were spawned, as opposed to `scope()`'s
+/// LIFO.  So in this example, we can expect `s.1` to execute before
+/// `s.2`, and `t.1` before `t.2`. Other threads also steal tasks in
+/// FIFO order, as usual. Overall, this has roughly the same order as
+/// the now-deprecated [`breadth_first`] option, except the effect is
+/// isolated to a particular scope. If spawns are intermingled from any
+/// combination of `scope()` and `scope_fifo()`, or from different
+/// threads, their order is only specified with respect to spawns in the
+/// same scope and thread.
+///
+/// For more details on this design, see Rayon [RFC #1].
+///
+/// [`breadth_first`]: struct.ThreadPoolBuilder.html#method.breadth_first
+/// [RFC #1]: https://github.com/rayon-rs/rfcs/blob/master/accepted/rfc0001-scope-scheduling.md
+///
+/// # Panics
+///
+/// If a panic occurs, either in the closure given to `scope_fifo()` or
+/// in any of the spawned jobs, that panic will be propagated and the
+/// call to `scope_fifo()` will panic. If multiple panics occurs, it is
+/// non-deterministic which of their panic values will propagate.
+/// Regardless, once a task is spawned using `scope.spawn_fifo()`, it
+/// will execute, even if the spawning task should later panic.
+/// `scope_fifo()` returns once all spawned jobs have completed, and any
+/// panics are propagated at that point.
+pub fn scope_fifo<'scope, OP, R>(op: OP) -> R
+where
+    OP: FnOnce(&ScopeFifo<'scope>) -> R + Send,
+    R: Send,
+{
+    in_worker(|owner_thread, _| {
+        let scope = ScopeFifo::<'scope>::new(Some(owner_thread), None);
+        scope.base.complete(Some(owner_thread), || op(&scope))
+    })
+}
+
+/// Creates a "fork-join" scope `s` and invokes the closure with a
+/// reference to `s`. This closure can then spawn asynchronous tasks
+/// into `s`. Those tasks may run asynchronously with respect to the
+/// closure; they may themselves spawn additional tasks into `s`. When
+/// the closure returns, it will block until all tasks that have been
+/// spawned into `s` complete.
+///
+/// This is just like `scope()` except the closure runs on the same thread
+/// that calls `in_place_scope()`. Only work that it spawns runs in the
+/// thread pool.
+///
+/// # Panics
+///
+/// If a panic occurs, either in the closure given to `in_place_scope()` or in
+/// any of the spawned jobs, that panic will be propagated and the
+/// call to `in_place_scope()` will panic. If multiple panics occurs, it is
+/// non-deterministic which of their panic values will propagate.
+/// Regardless, once a task is spawned using `scope.spawn()`, it will
+/// execute, even if the spawning task should later panic. `in_place_scope()`
+/// returns once all spawned jobs have completed, and any panics are
+/// propagated at that point.
+pub fn in_place_scope<'scope, OP, R>(op: OP) -> R
+where
+    OP: FnOnce(&Scope<'scope>) -> R,
+{
+    do_in_place_scope(None, op)
+}
+
+pub(crate) fn do_in_place_scope<'scope, OP, R>(registry: Option<&Arc<Registry>>, op: OP) -> R
+where
+    OP: FnOnce(&Scope<'scope>) -> R,
+{
+    let thread = unsafe { WorkerThread::current().as_ref() };
+    let scope = Scope::<'scope>::new(thread, registry);
+    scope.base.complete(thread, || op(&scope))
+}
+
+/// Creates a "fork-join" scope `s` with FIFO order, and invokes the
+/// closure with a reference to `s`. This closure can then spawn
+/// asynchronous tasks into `s`. Those tasks may run asynchronously with
+/// respect to the closure; they may themselves spawn additional tasks
+/// into `s`. When the closure returns, it will block until all tasks
+/// that have been spawned into `s` complete.
+///
+/// This is just like `scope_fifo()` except the closure runs on the same thread
+/// that calls `in_place_scope_fifo()`. Only work that it spawns runs in the
+/// thread pool.
+///
+/// # Panics
+///
+/// If a panic occurs, either in the closure given to `in_place_scope_fifo()` or in
+/// any of the spawned jobs, that panic will be propagated and the
+/// call to `in_place_scope_fifo()` will panic. If multiple panics occurs, it is
+/// non-deterministic which of their panic values will propagate.
+/// Regardless, once a task is spawned using `scope.spawn_fifo()`, it will
+/// execute, even if the spawning task should later panic. `in_place_scope_fifo()`
+/// returns once all spawned jobs have completed, and any panics are
+/// propagated at that point.
+pub fn in_place_scope_fifo<'scope, OP, R>(op: OP) -> R
+where
+    OP: FnOnce(&ScopeFifo<'scope>) -> R,
+{
+    do_in_place_scope_fifo(None, op)
+}
+
+pub(crate) fn do_in_place_scope_fifo<'scope, OP, R>(registry: Option<&Arc<Registry>>, op: OP) -> R
+where
+    OP: FnOnce(&ScopeFifo<'scope>) -> R,
+{
+    let thread = unsafe { WorkerThread::current().as_ref() };
+    let scope = ScopeFifo::<'scope>::new(thread, registry);
+    scope.base.complete(thread, || op(&scope))
+}
+
+impl<'scope> Scope<'scope> {
+    fn new(owner: Option<&WorkerThread>, registry: Option<&Arc<Registry>>) -> Self {
+        let base = ScopeBase::new(owner, registry);
+        Scope { base }
+    }
+
+    /// Spawns a job into the fork-join scope `self`. This job will
+    /// execute sometime before the fork-join scope completes.  The
+    /// job is specified as a closure, and this closure receives its
+    /// own reference to the scope `self` as argument. This can be
+    /// used to inject new jobs into `self`.
+    ///
+    /// # Returns
+    ///
+    /// Nothing. The spawned closures cannot pass back values to the
+    /// caller directly, though they can write to local variables on
+    /// the stack (if those variables outlive the scope) or
+    /// communicate through shared channels.
+    ///
+    /// (The intention is to eventually integrate with Rust futures to
+    /// support spawns of functions that compute a value.)
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// # use rayon_core as rayon;
+    /// let mut value_a = None;
+    /// let mut value_b = None;
+    /// let mut value_c = None;
+    /// rayon::scope(|s| {
+    ///     s.spawn(|s1| {
+    ///           // ^ this is the same scope as `s`; this handle `s1`
+    ///           //   is intended for use by the spawned task,
+    ///           //   since scope handles cannot cross thread boundaries.
+    ///
+    ///         value_a = Some(22);
+    ///
+    ///         // the scope `s` will not end until all these tasks are done
+    ///         s1.spawn(|_| {
+    ///             value_b = Some(44);
+    ///         });
+    ///     });
+    ///
+    ///     s.spawn(|_| {
+    ///         value_c = Some(66);
+    ///     });
+    /// });
+    /// assert_eq!(value_a, Some(22));
+    /// assert_eq!(value_b, Some(44));
+    /// assert_eq!(value_c, Some(66));
+    /// ```
+    ///
+    /// # See also
+    ///
+    /// The [`scope` function] has more extensive documentation about
+    /// task spawning.
+    ///
+    /// [`scope` function]: fn.scope.html
+    pub fn spawn<BODY>(&self, body: BODY)
+    where
+        BODY: FnOnce(&Scope<'scope>) + Send + 'scope,
+    {
+        let scope_ptr = ScopePtr(self);
+        let job = HeapJob::new(move || unsafe {
+            // SAFETY: this job will execute before the scope ends.
+            let scope = scope_ptr.as_ref();
+            ScopeBase::execute_job(&scope.base, move || body(scope))
+        });
+        let job_ref = self.base.heap_job_ref(job);
+
+        // Since `Scope` implements `Sync`, we can't be sure that we're still in a
+        // thread of this pool, so we can't just push to the local worker thread.
+        // Also, this might be an in-place scope.
+        self.base.registry.inject_or_push(job_ref);
+    }
+
+    /// Spawns a job into every thread of the fork-join scope `self`. This job will
+    /// execute on each thread sometime before the fork-join scope completes.  The
+    /// job is specified as a closure, and this closure receives its own reference
+    /// to the scope `self` as argument, as well as a `BroadcastContext`.
+    pub fn spawn_broadcast<BODY>(&self, body: BODY)
+    where
+        BODY: Fn(&Scope<'scope>, BroadcastContext<'_>) + Send + Sync + 'scope,
+    {
+        let scope_ptr = ScopePtr(self);
+        let job = ArcJob::new(move || unsafe {
+            // SAFETY: this job will execute before the scope ends.
+            let scope = scope_ptr.as_ref();
+            let body = &body;
+            let func = move || BroadcastContext::with(move |ctx| body(scope, ctx));
+            ScopeBase::execute_job(&scope.base, func)
+        });
+        self.base.inject_broadcast(job)
+    }
+}
+
+impl<'scope> ScopeFifo<'scope> {
+    fn new(owner: Option<&WorkerThread>, registry: Option<&Arc<Registry>>) -> Self {
+        let base = ScopeBase::new(owner, registry);
+        let num_threads = base.registry.num_threads();
+        let fifos = (0..num_threads).map(|_| JobFifo::new()).collect();
+        ScopeFifo { base, fifos }
+    }
+
+    /// Spawns a job into the fork-join scope `self`. This job will
+    /// execute sometime before the fork-join scope completes.  The
+    /// job is specified as a closure, and this closure receives its
+    /// own reference to the scope `self` as argument. This can be
+    /// used to inject new jobs into `self`.
+    ///
+    /// # See also
+    ///
+    /// This method is akin to [`Scope::spawn()`], but with a FIFO
+    /// priority.  The [`scope_fifo` function] has more details about
+    /// this distinction.
+    ///
+    /// [`Scope::spawn()`]: struct.Scope.html#method.spawn
+    /// [`scope_fifo` function]: fn.scope_fifo.html
+    pub fn spawn_fifo<BODY>(&self, body: BODY)
+    where
+        BODY: FnOnce(&ScopeFifo<'scope>) + Send + 'scope,
+    {
+        let scope_ptr = ScopePtr(self);
+        let job = HeapJob::new(move || unsafe {
+            // SAFETY: this job will execute before the scope ends.
+            let scope = scope_ptr.as_ref();
+            ScopeBase::execute_job(&scope.base, move || body(scope))
+        });
+        let job_ref = self.base.heap_job_ref(job);
+
+        // If we're in the pool, use our scope's private fifo for this thread to execute
+        // in a locally-FIFO order. Otherwise, just use the pool's global injector.
+        match self.base.registry.current_thread() {
+            Some(worker) => {
+                let fifo = &self.fifos[worker.index()];
+                // SAFETY: this job will execute before the scope ends.
+                unsafe { worker.push(fifo.push(job_ref)) };
+            }
+            None => self.base.registry.inject(job_ref),
+        }
+    }
+
+    /// Spawns a job into every thread of the fork-join scope `self`. This job will
+    /// execute on each thread sometime before the fork-join scope completes.  The
+    /// job is specified as a closure, and this closure receives its own reference
+    /// to the scope `self` as argument, as well as a `BroadcastContext`.
+    pub fn spawn_broadcast<BODY>(&self, body: BODY)
+    where
+        BODY: Fn(&ScopeFifo<'scope>, BroadcastContext<'_>) + Send + Sync + 'scope,
+    {
+        let scope_ptr = ScopePtr(self);
+        let job = ArcJob::new(move || unsafe {
+            // SAFETY: this job will execute before the scope ends.
+            let scope = scope_ptr.as_ref();
+            let body = &body;
+            let func = move || BroadcastContext::with(move |ctx| body(scope, ctx));
+            ScopeBase::execute_job(&scope.base, func)
+        });
+        self.base.inject_broadcast(job)
+    }
+}
+
+impl<'scope> ScopeBase<'scope> {
+    /// Creates the base of a new scope for the given registry
+    fn new(owner: Option<&WorkerThread>, registry: Option<&Arc<Registry>>) -> Self {
+        let registry = registry.unwrap_or_else(|| match owner {
+            Some(owner) => owner.registry(),
+            None => global_registry(),
+        });
+
+        ScopeBase {
+            registry: Arc::clone(registry),
+            panic: AtomicPtr::new(ptr::null_mut()),
+            job_completed_latch: ScopeLatch::new(owner),
+            marker: PhantomData,
+        }
+    }
+
+    fn increment(&self) {
+        self.job_completed_latch.increment();
+    }
+
+    fn heap_job_ref<FUNC>(&self, job: Box<HeapJob<FUNC>>) -> JobRef
+    where
+        FUNC: FnOnce() + Send + 'scope,
+    {
+        unsafe {
+            self.increment();
+            job.into_job_ref()
+        }
+    }
+
+    fn inject_broadcast<FUNC>(&self, job: Arc<ArcJob<FUNC>>)
+    where
+        FUNC: Fn() + Send + Sync + 'scope,
+    {
+        let n_threads = self.registry.num_threads();
+        let job_refs = (0..n_threads).map(|_| unsafe {
+            self.increment();
+            ArcJob::as_job_ref(&job)
+        });
+
+        self.registry.inject_broadcast(job_refs);
+    }
+
+    /// Executes `func` as a job, either aborting or executing as
+    /// appropriate.
+    fn complete<FUNC, R>(&self, owner: Option<&WorkerThread>, func: FUNC) -> R
+    where
+        FUNC: FnOnce() -> R,
+    {
+        let result = unsafe { Self::execute_job_closure(self, func) };
+        self.job_completed_latch.wait(owner);
+        self.maybe_propagate_panic();
+        result.unwrap() // only None if `op` panicked, and that would have been propagated
+    }
+
+    /// Executes `func` as a job, either aborting or executing as
+    /// appropriate.
+    unsafe fn execute_job<FUNC>(this: *const Self, func: FUNC)
+    where
+        FUNC: FnOnce(),
+    {
+        let _: Option<()> = Self::execute_job_closure(this, func);
+    }
+
+    /// Executes `func` as a job in scope. Adjusts the "job completed"
+    /// counters and also catches any panic and stores it into
+    /// `scope`.
+    unsafe fn execute_job_closure<FUNC, R>(this: *const Self, func: FUNC) -> Option<R>
+    where
+        FUNC: FnOnce() -> R,
+    {
+        match unwind::halt_unwinding(func) {
+            Ok(r) => {
+                Latch::set(&(*this).job_completed_latch);
+                Some(r)
+            }
+            Err(err) => {
+                (*this).job_panicked(err);
+                Latch::set(&(*this).job_completed_latch);
+                None
+            }
+        }
+    }
+
+    fn job_panicked(&self, err: Box<dyn Any + Send + 'static>) {
+        // capture the first error we see, free the rest
+        if self.panic.load(Ordering::Relaxed).is_null() {
+            let nil = ptr::null_mut();
+            let mut err = ManuallyDrop::new(Box::new(err)); // box up the fat ptr
+            let err_ptr: *mut Box<dyn Any + Send + 'static> = &mut **err;
+            if self
+                .panic
+                .compare_exchange(nil, err_ptr, Ordering::Release, Ordering::Relaxed)
+                .is_ok()
+            {
+                // ownership now transferred into self.panic
+            } else {
+                // another panic raced in ahead of us, so drop ours
+                let _: Box<Box<_>> = ManuallyDrop::into_inner(err);
+            }
+        }
+    }
+
+    fn maybe_propagate_panic(&self) {
+        // propagate panic, if any occurred; at this point, all
+        // outstanding jobs have completed, so we can use a relaxed
+        // ordering:
+        let panic = self.panic.swap(ptr::null_mut(), Ordering::Relaxed);
+        if !panic.is_null() {
+            let value = unsafe { Box::from_raw(panic) };
+            unwind::resume_unwinding(*value);
+        }
+    }
+}
+
+impl ScopeLatch {
+    fn new(owner: Option<&WorkerThread>) -> Self {
+        Self::with_count(1, owner)
+    }
+
+    pub(super) fn with_count(count: usize, owner: Option<&WorkerThread>) -> Self {
+        match owner {
+            Some(owner) => ScopeLatch::Stealing {
+                latch: CountLatch::with_count(count),
+                registry: Arc::clone(owner.registry()),
+                worker_index: owner.index(),
+            },
+            None => ScopeLatch::Blocking {
+                latch: CountLockLatch::with_count(count),
+            },
+        }
+    }
+
+    fn increment(&self) {
+        match self {
+            ScopeLatch::Stealing { latch, .. } => latch.increment(),
+            ScopeLatch::Blocking { latch } => latch.increment(),
+        }
+    }
+
+    pub(super) fn wait(&self, owner: Option<&WorkerThread>) {
+        match self {
+            ScopeLatch::Stealing {
+                latch,
+                registry,
+                worker_index,
+            } => unsafe {
+                let owner = owner.expect("owner thread");
+                debug_assert_eq!(registry.id(), owner.registry().id());
+                debug_assert_eq!(*worker_index, owner.index());
+                owner.wait_until(latch);
+            },
+            ScopeLatch::Blocking { latch } => latch.wait(),
+        }
+    }
+}
+
+impl Latch for ScopeLatch {
+    unsafe fn set(this: *const Self) {
+        match &*this {
+            ScopeLatch::Stealing {
+                latch,
+                registry,
+                worker_index,
+            } => CountLatch::set_and_tickle_one(latch, registry, *worker_index),
+            ScopeLatch::Blocking { latch } => Latch::set(latch),
+        }
+    }
+}
+
+impl<'scope> fmt::Debug for Scope<'scope> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.debug_struct("Scope")
+            .field("pool_id", &self.base.registry.id())
+            .field("panic", &self.base.panic)
+            .field("job_completed_latch", &self.base.job_completed_latch)
+            .finish()
+    }
+}
+
+impl<'scope> fmt::Debug for ScopeFifo<'scope> {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt.debug_struct("ScopeFifo")
+            .field("num_fifos", &self.fifos.len())
+            .field("pool_id", &self.base.registry.id())
+            .field("panic", &self.base.panic)
+            .field("job_completed_latch", &self.base.job_completed_latch)
+            .finish()
+    }
+}
+
+impl fmt::Debug for ScopeLatch {
+    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            ScopeLatch::Stealing { latch, .. } => fmt
+                .debug_tuple("ScopeLatch::Stealing")
+                .field(latch)
+                .finish(),
+            ScopeLatch::Blocking { latch } => fmt
+                .debug_tuple("ScopeLatch::Blocking")
+                .field(latch)
+                .finish(),
+        }
+    }
+}
+
+/// Used to capture a scope `&Self` pointer in jobs, without faking a lifetime.
+///
+/// Unsafe code is still required to dereference the pointer, but that's fine in
+/// scope jobs that are guaranteed to execute before the scope ends.
+struct ScopePtr<T>(*const T);
+
+// SAFETY: !Send for raw pointers is not for safety, just as a lint
+unsafe impl<T: Sync> Send for ScopePtr<T> {}
+
+// SAFETY: !Sync for raw pointers is not for safety, just as a lint
+unsafe impl<T: Sync> Sync for ScopePtr<T> {}
+
+impl<T> ScopePtr<T> {
+    // Helper to avoid disjoint captures of `scope_ptr.0`
+    unsafe fn as_ref(&self) -> &T {
+        &*self.0
+    }
+}
diff --git a/rayon/rayon-core/src/scope/test.rs b/rayon/rayon-core/src/scope/test.rs
new file mode 100644
index 0000000..ad8c4af
--- /dev/null
+++ b/rayon/rayon-core/src/scope/test.rs
@@ -0,0 +1,619 @@
+use crate::unwind;
+use crate::ThreadPoolBuilder;
+use crate::{scope, scope_fifo, Scope, ScopeFifo};
+use rand::{Rng, SeedableRng};
+use rand_xorshift::XorShiftRng;
+use std::cmp;
+use std::iter::once;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Barrier, Mutex};
+use std::vec;
+
+#[test]
+fn scope_empty() {
+    scope(|_| {});
+}
+
+#[test]
+fn scope_result() {
+    let x = scope(|_| 22);
+    assert_eq!(x, 22);
+}
+
+#[test]
+fn scope_two() {
+    let counter = &AtomicUsize::new(0);
+    scope(|s| {
+        s.spawn(move |_| {
+            counter.fetch_add(1, Ordering::SeqCst);
+        });
+        s.spawn(move |_| {
+            counter.fetch_add(10, Ordering::SeqCst);
+        });
+    });
+
+    let v = counter.load(Ordering::SeqCst);
+    assert_eq!(v, 11);
+}
+
+#[test]
+fn scope_divide_and_conquer() {
+    let counter_p = &AtomicUsize::new(0);
+    scope(|s| s.spawn(move |s| divide_and_conquer(s, counter_p, 1024)));
+
+    let counter_s = &AtomicUsize::new(0);
+    divide_and_conquer_seq(counter_s, 1024);
+
+    let p = counter_p.load(Ordering::SeqCst);
+    let s = counter_s.load(Ordering::SeqCst);
+    assert_eq!(p, s);
+}
+
+fn divide_and_conquer<'scope>(scope: &Scope<'scope>, counter: &'scope AtomicUsize, size: usize) {
+    if size > 1 {
+        scope.spawn(move |scope| divide_and_conquer(scope, counter, size / 2));
+        scope.spawn(move |scope| divide_and_conquer(scope, counter, size / 2));
+    } else {
+        // count the leaves
+        counter.fetch_add(1, Ordering::SeqCst);
+    }
+}
+
+fn divide_and_conquer_seq(counter: &AtomicUsize, size: usize) {
+    if size > 1 {
+        divide_and_conquer_seq(counter, size / 2);
+        divide_and_conquer_seq(counter, size / 2);
+    } else {
+        // count the leaves
+        counter.fetch_add(1, Ordering::SeqCst);
+    }
+}
+
+struct Tree<T: Send> {
+    value: T,
+    children: Vec<Tree<T>>,
+}
+
+impl<T: Send> Tree<T> {
+    fn iter(&self) -> vec::IntoIter<&T> {
+        once(&self.value)
+            .chain(self.children.iter().flat_map(Tree::iter))
+            .collect::<Vec<_>>() // seems like it shouldn't be needed... but prevents overflow
+            .into_iter()
+    }
+
+    fn update<OP>(&mut self, op: OP)
+    where
+        OP: Fn(&mut T) + Sync,
+        T: Send,
+    {
+        scope(|s| self.update_in_scope(&op, s));
+    }
+
+    fn update_in_scope<'scope, OP>(&'scope mut self, op: &'scope OP, scope: &Scope<'scope>)
+    where
+        OP: Fn(&mut T) + Sync,
+    {
+        let Tree {
+            ref mut value,
+            ref mut children,
+        } = *self;
+        scope.spawn(move |scope| {
+            for child in children {
+                scope.spawn(move |scope| child.update_in_scope(op, scope));
+            }
+        });
+
+        op(value);
+    }
+}
+
+fn random_tree(depth: usize) -> Tree<u32> {
+    assert!(depth > 0);
+    let mut seed = <XorShiftRng as SeedableRng>::Seed::default();
+    (0..).zip(seed.as_mut()).for_each(|(i, x)| *x = i);
+    let mut rng = XorShiftRng::from_seed(seed);
+    random_tree1(depth, &mut rng)
+}
+
+fn random_tree1(depth: usize, rng: &mut XorShiftRng) -> Tree<u32> {
+    let children = if depth == 0 {
+        vec![]
+    } else {
+        (0..rng.gen_range(0..4)) // somewhere between 0 and 3 children at each level
+            .map(|_| random_tree1(depth - 1, rng))
+            .collect()
+    };
+
+    Tree {
+        value: rng.gen_range(0..1_000_000),
+        children,
+    }
+}
+
+#[test]
+fn update_tree() {
+    let mut tree: Tree<u32> = random_tree(10);
+    let values: Vec<u32> = tree.iter().cloned().collect();
+    tree.update(|v| *v += 1);
+    let new_values: Vec<u32> = tree.iter().cloned().collect();
+    assert_eq!(values.len(), new_values.len());
+    for (&i, &j) in values.iter().zip(&new_values) {
+        assert_eq!(i + 1, j);
+    }
+}
+
+/// Check that if you have a chain of scoped tasks where T0 spawns T1
+/// spawns T2 and so forth down to Tn, the stack space should not grow
+/// linearly with N. We test this by some unsafe hackery and
+/// permitting an approx 10% change with a 10x input change.
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn linear_stack_growth() {
+    let builder = ThreadPoolBuilder::new().num_threads(1);
+    let pool = builder.build().unwrap();
+    pool.install(|| {
+        let mut max_diff = Mutex::new(0);
+        let bottom_of_stack = 0;
+        scope(|s| the_final_countdown(s, &bottom_of_stack, &max_diff, 5));
+        let diff_when_5 = *max_diff.get_mut().unwrap() as f64;
+
+        scope(|s| the_final_countdown(s, &bottom_of_stack, &max_diff, 500));
+        let diff_when_500 = *max_diff.get_mut().unwrap() as f64;
+
+        let ratio = diff_when_5 / diff_when_500;
+        assert!(
+            ratio > 0.9 && ratio < 1.1,
+            "stack usage ratio out of bounds: {}",
+            ratio
+        );
+    });
+}
+
+fn the_final_countdown<'scope>(
+    s: &Scope<'scope>,
+    bottom_of_stack: &'scope i32,
+    max: &'scope Mutex<usize>,
+    n: usize,
+) {
+    let top_of_stack = 0;
+    let p = bottom_of_stack as *const i32 as usize;
+    let q = &top_of_stack as *const i32 as usize;
+    let diff = if p > q { p - q } else { q - p };
+
+    let mut data = max.lock().unwrap();
+    *data = cmp::max(diff, *data);
+
+    if n > 0 {
+        s.spawn(move |s| the_final_countdown(s, bottom_of_stack, max, n - 1));
+    }
+}
+
+#[test]
+#[should_panic(expected = "Hello, world!")]
+fn panic_propagate_scope() {
+    scope(|_| panic!("Hello, world!"));
+}
+
+#[test]
+#[should_panic(expected = "Hello, world!")]
+fn panic_propagate_spawn() {
+    scope(|s| s.spawn(|_| panic!("Hello, world!")));
+}
+
+#[test]
+#[should_panic(expected = "Hello, world!")]
+fn panic_propagate_nested_spawn() {
+    scope(|s| s.spawn(|s| s.spawn(|s| s.spawn(|_| panic!("Hello, world!")))));
+}
+
+#[test]
+#[should_panic(expected = "Hello, world!")]
+fn panic_propagate_nested_scope_spawn() {
+    scope(|s| s.spawn(|_| scope(|s| s.spawn(|_| panic!("Hello, world!")))));
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
+fn panic_propagate_still_execute_1() {
+    let mut x = false;
+    match unwind::halt_unwinding(|| {
+        scope(|s| {
+            s.spawn(|_| panic!("Hello, world!")); // job A
+            s.spawn(|_| x = true); // job B, should still execute even though A panics
+        });
+    }) {
+        Ok(_) => panic!("failed to propagate panic"),
+        Err(_) => assert!(x, "job b failed to execute"),
+    }
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
+fn panic_propagate_still_execute_2() {
+    let mut x = false;
+    match unwind::halt_unwinding(|| {
+        scope(|s| {
+            s.spawn(|_| x = true); // job B, should still execute even though A panics
+            s.spawn(|_| panic!("Hello, world!")); // job A
+        });
+    }) {
+        Ok(_) => panic!("failed to propagate panic"),
+        Err(_) => assert!(x, "job b failed to execute"),
+    }
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
+fn panic_propagate_still_execute_3() {
+    let mut x = false;
+    match unwind::halt_unwinding(|| {
+        scope(|s| {
+            s.spawn(|_| x = true); // spawned job should still execute despite later panic
+            panic!("Hello, world!");
+        });
+    }) {
+        Ok(_) => panic!("failed to propagate panic"),
+        Err(_) => assert!(x, "panic after spawn, spawn failed to execute"),
+    }
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
+fn panic_propagate_still_execute_4() {
+    let mut x = false;
+    match unwind::halt_unwinding(|| {
+        scope(|s| {
+            s.spawn(|_| panic!("Hello, world!"));
+            x = true;
+        });
+    }) {
+        Ok(_) => panic!("failed to propagate panic"),
+        Err(_) => assert!(x, "panic in spawn tainted scope"),
+    }
+}
+
+macro_rules! test_order {
+    ($scope:ident => $spawn:ident) => {{
+        let builder = ThreadPoolBuilder::new().num_threads(1);
+        let pool = builder.build().unwrap();
+        pool.install(|| {
+            let vec = Mutex::new(vec![]);
+            $scope(|scope| {
+                let vec = &vec;
+                for i in 0..10 {
+                    scope.$spawn(move |scope| {
+                        for j in 0..10 {
+                            scope.$spawn(move |_| {
+                                vec.lock().unwrap().push(i * 10 + j);
+                            });
+                        }
+                    });
+                }
+            });
+            vec.into_inner().unwrap()
+        })
+    }};
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn lifo_order() {
+    // In the absence of stealing, `scope()` runs its `spawn()` jobs in LIFO order.
+    let vec = test_order!(scope => spawn);
+    let expected: Vec<i32> = (0..100).rev().collect(); // LIFO -> reversed
+    assert_eq!(vec, expected);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn fifo_order() {
+    // In the absence of stealing, `scope_fifo()` runs its `spawn_fifo()` jobs in FIFO order.
+    let vec = test_order!(scope_fifo => spawn_fifo);
+    let expected: Vec<i32> = (0..100).collect(); // FIFO -> natural order
+    assert_eq!(vec, expected);
+}
+
+macro_rules! test_nested_order {
+    ($outer_scope:ident => $outer_spawn:ident,
+     $inner_scope:ident => $inner_spawn:ident) => {{
+        let builder = ThreadPoolBuilder::new().num_threads(1);
+        let pool = builder.build().unwrap();
+        pool.install(|| {
+            let vec = Mutex::new(vec![]);
+            $outer_scope(|scope| {
+                let vec = &vec;
+                for i in 0..10 {
+                    scope.$outer_spawn(move |_| {
+                        $inner_scope(|scope| {
+                            for j in 0..10 {
+                                scope.$inner_spawn(move |_| {
+                                    vec.lock().unwrap().push(i * 10 + j);
+                                });
+                            }
+                        });
+                    });
+                }
+            });
+            vec.into_inner().unwrap()
+        })
+    }};
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn nested_lifo_order() {
+    // In the absence of stealing, `scope()` runs its `spawn()` jobs in LIFO order.
+    let vec = test_nested_order!(scope => spawn, scope => spawn);
+    let expected: Vec<i32> = (0..100).rev().collect(); // LIFO -> reversed
+    assert_eq!(vec, expected);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn nested_fifo_order() {
+    // In the absence of stealing, `scope_fifo()` runs its `spawn_fifo()` jobs in FIFO order.
+    let vec = test_nested_order!(scope_fifo => spawn_fifo, scope_fifo => spawn_fifo);
+    let expected: Vec<i32> = (0..100).collect(); // FIFO -> natural order
+    assert_eq!(vec, expected);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn nested_lifo_fifo_order() {
+    // LIFO on the outside, FIFO on the inside
+    let vec = test_nested_order!(scope => spawn, scope_fifo => spawn_fifo);
+    let expected: Vec<i32> = (0..10)
+        .rev()
+        .flat_map(|i| (0..10).map(move |j| i * 10 + j))
+        .collect();
+    assert_eq!(vec, expected);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn nested_fifo_lifo_order() {
+    // FIFO on the outside, LIFO on the inside
+    let vec = test_nested_order!(scope_fifo => spawn_fifo, scope => spawn);
+    let expected: Vec<i32> = (0..10)
+        .flat_map(|i| (0..10).rev().map(move |j| i * 10 + j))
+        .collect();
+    assert_eq!(vec, expected);
+}
+
+macro_rules! spawn_push {
+    ($scope:ident . $spawn:ident, $vec:ident, $i:expr) => {{
+        $scope.$spawn(move |_| $vec.lock().unwrap().push($i));
+    }};
+}
+
+/// Test spawns pushing a series of numbers, interleaved
+/// such that negative values are using an inner scope.
+macro_rules! test_mixed_order {
+    ($outer_scope:ident => $outer_spawn:ident,
+     $inner_scope:ident => $inner_spawn:ident) => {{
+        let builder = ThreadPoolBuilder::new().num_threads(1);
+        let pool = builder.build().unwrap();
+        pool.install(|| {
+            let vec = Mutex::new(vec![]);
+            $outer_scope(|outer_scope| {
+                let vec = &vec;
+                spawn_push!(outer_scope.$outer_spawn, vec, 0);
+                $inner_scope(|inner_scope| {
+                    spawn_push!(inner_scope.$inner_spawn, vec, -1);
+                    spawn_push!(outer_scope.$outer_spawn, vec, 1);
+                    spawn_push!(inner_scope.$inner_spawn, vec, -2);
+                    spawn_push!(outer_scope.$outer_spawn, vec, 2);
+                    spawn_push!(inner_scope.$inner_spawn, vec, -3);
+                });
+                spawn_push!(outer_scope.$outer_spawn, vec, 3);
+            });
+            vec.into_inner().unwrap()
+        })
+    }};
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn mixed_lifo_order() {
+    // NB: the end of the inner scope makes us execute some of the outer scope
+    // before they've all been spawned, so they're not perfectly LIFO.
+    let vec = test_mixed_order!(scope => spawn, scope => spawn);
+    let expected = vec![-3, 2, -2, 1, -1, 3, 0];
+    assert_eq!(vec, expected);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn mixed_fifo_order() {
+    let vec = test_mixed_order!(scope_fifo => spawn_fifo, scope_fifo => spawn_fifo);
+    let expected = vec![-1, 0, -2, 1, -3, 2, 3];
+    assert_eq!(vec, expected);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn mixed_lifo_fifo_order() {
+    // NB: the end of the inner scope makes us execute some of the outer scope
+    // before they've all been spawned, so they're not perfectly LIFO.
+    let vec = test_mixed_order!(scope => spawn, scope_fifo => spawn_fifo);
+    let expected = vec![-1, 2, -2, 1, -3, 3, 0];
+    assert_eq!(vec, expected);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn mixed_fifo_lifo_order() {
+    let vec = test_mixed_order!(scope_fifo => spawn_fifo, scope => spawn);
+    let expected = vec![-3, 0, -2, 1, -1, 2, 3];
+    assert_eq!(vec, expected);
+}
+
+#[test]
+fn static_scope() {
+    static COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+    let mut range = 0..100;
+    let sum = range.clone().sum();
+    let iter = &mut range;
+
+    COUNTER.store(0, Ordering::Relaxed);
+    scope(|s: &Scope<'static>| {
+        // While we're allowed the locally borrowed iterator,
+        // the spawns must be static.
+        for i in iter {
+            s.spawn(move |_| {
+                COUNTER.fetch_add(i, Ordering::Relaxed);
+            });
+        }
+    });
+
+    assert_eq!(COUNTER.load(Ordering::Relaxed), sum);
+}
+
+#[test]
+fn static_scope_fifo() {
+    static COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+    let mut range = 0..100;
+    let sum = range.clone().sum();
+    let iter = &mut range;
+
+    COUNTER.store(0, Ordering::Relaxed);
+    scope_fifo(|s: &ScopeFifo<'static>| {
+        // While we're allowed the locally borrowed iterator,
+        // the spawns must be static.
+        for i in iter {
+            s.spawn_fifo(move |_| {
+                COUNTER.fetch_add(i, Ordering::Relaxed);
+            });
+        }
+    });
+
+    assert_eq!(COUNTER.load(Ordering::Relaxed), sum);
+}
+
+#[test]
+fn mixed_lifetime_scope() {
+    fn increment<'slice, 'counter>(counters: &'slice [&'counter AtomicUsize]) {
+        scope(move |s: &Scope<'counter>| {
+            // We can borrow 'slice here, but the spawns can only borrow 'counter.
+            for &c in counters {
+                s.spawn(move |_| {
+                    c.fetch_add(1, Ordering::Relaxed);
+                });
+            }
+        });
+    }
+
+    let counter = AtomicUsize::new(0);
+    increment(&[&counter; 100]);
+    assert_eq!(counter.into_inner(), 100);
+}
+
+#[test]
+fn mixed_lifetime_scope_fifo() {
+    fn increment<'slice, 'counter>(counters: &'slice [&'counter AtomicUsize]) {
+        scope_fifo(move |s: &ScopeFifo<'counter>| {
+            // We can borrow 'slice here, but the spawns can only borrow 'counter.
+            for &c in counters {
+                s.spawn_fifo(move |_| {
+                    c.fetch_add(1, Ordering::Relaxed);
+                });
+            }
+        });
+    }
+
+    let counter = AtomicUsize::new(0);
+    increment(&[&counter; 100]);
+    assert_eq!(counter.into_inner(), 100);
+}
+
+#[test]
+fn scope_spawn_broadcast() {
+    let sum = AtomicUsize::new(0);
+    let n = scope(|s| {
+        s.spawn_broadcast(|_, ctx| {
+            sum.fetch_add(ctx.index(), Ordering::Relaxed);
+        });
+        crate::current_num_threads()
+    });
+    assert_eq!(sum.into_inner(), n * (n - 1) / 2);
+}
+
+#[test]
+fn scope_fifo_spawn_broadcast() {
+    let sum = AtomicUsize::new(0);
+    let n = scope_fifo(|s| {
+        s.spawn_broadcast(|_, ctx| {
+            sum.fetch_add(ctx.index(), Ordering::Relaxed);
+        });
+        crate::current_num_threads()
+    });
+    assert_eq!(sum.into_inner(), n * (n - 1) / 2);
+}
+
+#[test]
+fn scope_spawn_broadcast_nested() {
+    let sum = AtomicUsize::new(0);
+    let n = scope(|s| {
+        s.spawn_broadcast(|s, _| {
+            s.spawn_broadcast(|_, ctx| {
+                sum.fetch_add(ctx.index(), Ordering::Relaxed);
+            });
+        });
+        crate::current_num_threads()
+    });
+    assert_eq!(sum.into_inner(), n * n * (n - 1) / 2);
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn scope_spawn_broadcast_barrier() {
+    let barrier = Barrier::new(8);
+    let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    pool.in_place_scope(|s| {
+        s.spawn_broadcast(|_, _| {
+            barrier.wait();
+        });
+        barrier.wait();
+    });
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn scope_spawn_broadcast_panic_one() {
+    let count = AtomicUsize::new(0);
+    let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
+    let result = crate::unwind::halt_unwinding(|| {
+        pool.scope(|s| {
+            s.spawn_broadcast(|_, ctx| {
+                count.fetch_add(1, Ordering::Relaxed);
... 229409 lines suppressed ...


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@teaclave.apache.org
For additional commands, e-mail: commits-help@teaclave.apache.org