Skip to content

Commit

Permalink
Re-implement proc-macro feature decoupling.
Browse files Browse the repository at this point in the history
  • Loading branch information
ehuss committed Mar 22, 2020
1 parent 7dec94d commit 944f504
Show file tree
Hide file tree
Showing 21 changed files with 190 additions and 256 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ path = "src/cargo/lib.rs"
atty = "0.2"
bytesize = "1.0"
cargo-platform = { path = "crates/cargo-platform", version = "0.1.1" }
crates-io = { path = "crates/crates-io", version = "0.32" }
crates-io = { path = "crates/crates-io", version = "0.31" }
crossbeam-utils = "0.7"
crypto-hash = "0.3.1"
curl = { version = "0.4.23", features = ["http2"] }
Expand Down
1 change: 0 additions & 1 deletion crates/cargo-test-support/src/registry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,6 @@ impl Package {
"cksum": cksum,
"features": self.features,
"yanked": self.yanked,
"pm": self.proc_macro,
})
.to_string();

Expand Down
2 changes: 1 addition & 1 deletion crates/crates-io/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "crates-io"
version = "0.32.0"
version = "0.31.0"
edition = "2018"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
license = "MIT OR Apache-2.0"
Expand Down
1 change: 0 additions & 1 deletion crates/crates-io/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ pub struct NewCrate {
pub repository: Option<String>,
pub badges: BTreeMap<String, BTreeMap<String, String>>,
pub links: Option<String>,
pub proc_macro: bool,
}

#[derive(Serialize)]
Expand Down
4 changes: 0 additions & 4 deletions crates/resolver-tests/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,6 @@ pub fn resolve_with_config_raw(
&BTreeMap::<String, Vec<String>>::new(),
None::<&String>,
false,
false,
)
.unwrap();
let opts = ResolveOpts::everything();
Expand Down Expand Up @@ -578,7 +577,6 @@ pub fn pkg_dep<T: ToPkgId>(name: T, dep: Vec<Dependency>) -> Summary {
&BTreeMap::<String, Vec<String>>::new(),
link,
false,
false,
)
.unwrap()
}
Expand Down Expand Up @@ -607,7 +605,6 @@ pub fn pkg_loc(name: &str, loc: &str) -> Summary {
&BTreeMap::<String, Vec<String>>::new(),
link,
false,
false,
)
.unwrap()
}
Expand All @@ -622,7 +619,6 @@ pub fn remove_dep(sum: &Summary, ind: usize) -> Summary {
&BTreeMap::<String, Vec<String>>::new(),
sum.links().map(|a| a.as_str()),
sum.namespaced_features(),
sum.proc_macro(),
)
.unwrap()
}
Expand Down
121 changes: 31 additions & 90 deletions src/cargo/core/compiler/unit_dependencies.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ use crate::core::compiler::unit_graph::{UnitDep, UnitGraph};
use crate::core::compiler::Unit;
use crate::core::compiler::{BuildContext, CompileKind, CompileMode};
use crate::core::dependency::DepKind;
use crate::core::package::Downloads;
use crate::core::profiles::{Profile, UnitFor};
use crate::core::resolver::features::{FeaturesFor, ResolvedFeatures};
use crate::core::resolver::Resolve;
Expand All @@ -31,10 +30,7 @@ use std::collections::{HashMap, HashSet};
/// Collection of stuff used while creating the `UnitGraph`.
struct State<'a, 'cfg> {
bcx: &'a BuildContext<'a, 'cfg>,
waiting_on_download: HashSet<PackageId>,
downloads: Downloads<'a, 'cfg>,
unit_dependencies: UnitGraph<'a>,
package_cache: HashMap<PackageId, &'a Package>,
usr_resolve: &'a Resolve,
usr_features: &'a ResolvedFeatures,
std_resolve: Option<&'a Resolve>,
Expand All @@ -58,10 +54,7 @@ pub fn build_unit_dependencies<'a, 'cfg>(
};
let mut state = State {
bcx,
downloads: bcx.packages.enable_download()?,
waiting_on_download: HashSet::new(),
unit_dependencies: HashMap::new(),
package_cache: HashMap::new(),
usr_resolve: resolve,
usr_features: features,
std_resolve,
Expand Down Expand Up @@ -141,44 +134,32 @@ fn attach_std_deps<'a, 'cfg>(
/// Compute all the dependencies of the given root units.
/// The result is stored in state.unit_dependencies.
fn deps_of_roots<'a, 'cfg>(roots: &[Unit<'a>], mut state: &mut State<'a, 'cfg>) -> CargoResult<()> {
// Loop because we are downloading while building the dependency graph.
// The partially-built unit graph is discarded through each pass of the
// loop because it is incomplete because not all required Packages have
// been downloaded.
loop {
for unit in roots.iter() {
state.get(unit.pkg.package_id())?;

// Dependencies of tests/benches should not have `panic` set.
// We check the global test mode to see if we are running in `cargo
// test` in which case we ensure all dependencies have `panic`
// cleared, and avoid building the lib thrice (once with `panic`, once
// without, once for `--test`). In particular, the lib included for
// Doc tests and examples are `Build` mode here.
let unit_for = if unit.mode.is_any_test() || state.bcx.build_config.test() {
UnitFor::new_test(state.bcx.config)
} else if unit.target.is_custom_build() {
// This normally doesn't happen, except `clean` aggressively
// generates all units.
UnitFor::new_host(false)
} else if unit.target.proc_macro() {
UnitFor::new_host(true)
} else if unit.target.for_host() {
// Plugin should never have panic set.
UnitFor::new_compiler()
} else {
UnitFor::new_normal()
};
deps_of(unit, &mut state, unit_for)?;
}

if !state.waiting_on_download.is_empty() {
state.finish_some_downloads()?;
state.unit_dependencies.clear();
for unit in roots.iter() {
state.get(unit.pkg.package_id());

// Dependencies of tests/benches should not have `panic` set.
// We check the global test mode to see if we are running in `cargo
// test` in which case we ensure all dependencies have `panic`
// cleared, and avoid building the lib thrice (once with `panic`, once
// without, once for `--test`). In particular, the lib included for
// Doc tests and examples are `Build` mode here.
let unit_for = if unit.mode.is_any_test() || state.bcx.build_config.test() {
UnitFor::new_test(state.bcx.config)
} else if unit.target.is_custom_build() {
// This normally doesn't happen, except `clean` aggressively
// generates all units.
UnitFor::new_host(false)
} else if unit.target.proc_macro() {
UnitFor::new_host(true)
} else if unit.target.for_host() {
// Plugin should never have panic set.
UnitFor::new_compiler()
} else {
break;
}
UnitFor::new_normal()
};
deps_of(unit, &mut state, unit_for)?;
}

Ok(())
}

Expand Down Expand Up @@ -269,10 +250,7 @@ fn compute_deps<'a, 'cfg>(

let mut ret = Vec::new();
for (id, _) in filtered_deps {
let pkg = match state.get(id)? {
Some(pkg) => pkg,
None => continue,
};
let pkg = state.get(id);
let lib = match pkg.targets().iter().find(|t| t.is_lib()) {
Some(t) => t,
None => continue,
Expand Down Expand Up @@ -419,10 +397,7 @@ fn compute_deps_doc<'a, 'cfg>(
// the documentation of the library being built.
let mut ret = Vec::new();
for (id, _deps) in deps {
let dep = match state.get(id)? {
Some(dep) => dep,
None => continue,
};
let dep = state.get(id);
let lib = match dep.targets().iter().find(|t| t.is_lib()) {
Some(lib) => lib,
None => continue,
Expand Down Expand Up @@ -730,44 +705,10 @@ impl<'a, 'cfg> State<'a, 'cfg> {
features.activated_features(pkg_id, features_for)
}

fn get(&mut self, id: PackageId) -> CargoResult<Option<&'a Package>> {
if let Some(pkg) = self.package_cache.get(&id) {
return Ok(Some(pkg));
}
if !self.waiting_on_download.insert(id) {
return Ok(None);
}
if let Some(pkg) = self.downloads.start(id)? {
self.package_cache.insert(id, pkg);
self.waiting_on_download.remove(&id);
return Ok(Some(pkg));
}
Ok(None)
}

/// Completes at least one downloading, maybe waiting for more to complete.
///
/// This function will block the current thread waiting for at least one
/// crate to finish downloading. The function may continue to download more
/// crates if it looks like there's a long enough queue of crates to keep
/// downloading. When only a handful of packages remain this function
/// returns, and it's hoped that by returning we'll be able to push more
/// packages to download into the queue.
fn finish_some_downloads(&mut self) -> CargoResult<()> {
assert!(self.downloads.remaining() > 0);
loop {
let pkg = self.downloads.wait()?;
self.waiting_on_download.remove(&pkg.package_id());
self.package_cache.insert(pkg.package_id(), pkg);

// Arbitrarily choose that 5 or more packages concurrently download
// is a good enough number to "fill the network pipe". If we have
// less than this let's recompute the whole unit dependency graph
// again and try to find some more packages to download.
if self.downloads.remaining() < 5 {
break;
}
}
Ok(())
fn get(&self, id: PackageId) -> &'a Package {
self.bcx
.packages
.get_one(id)
.unwrap_or_else(|_| panic!("expected {} to be downloaded", id))
}
}
81 changes: 80 additions & 1 deletion src/cargo/core/package.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use std::cell::{Cell, Ref, RefCell, RefMut};
use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::fmt;
use std::hash;
use std::mem;
Expand All @@ -17,7 +17,10 @@ use semver::Version;
use serde::ser;
use serde::Serialize;

use crate::core::compiler::{CompileKind, RustcTargetData};
use crate::core::dependency::DepKind;
use crate::core::interning::InternedString;
use crate::core::resolver::{HasDevUnits, Resolve};
use crate::core::source::MaybePackage;
use crate::core::{Dependency, Manifest, PackageId, SourceId, Target};
use crate::core::{FeatureMap, SourceMap, Summary};
Expand Down Expand Up @@ -189,6 +192,10 @@ impl Package {
pub fn publish(&self) -> &Option<Vec<String>> {
self.manifest.publish()
}
/// Returns `true` if this package is a proc-macro.
pub fn proc_macro(&self) -> bool {
self.targets().iter().any(|target| target.proc_macro())
}

/// Returns `true` if the package uses a custom build script for any target.
pub fn has_custom_build(&self) -> bool {
Expand Down Expand Up @@ -432,6 +439,9 @@ impl<'cfg> PackageSet<'cfg> {
}

pub fn get_one(&self, id: PackageId) -> CargoResult<&Package> {
if let Some(pkg) = self.packages.get(&id).and_then(|slot| slot.borrow()) {
return Ok(pkg);
}
Ok(self.get_many(Some(id))?.remove(0))
}

Expand All @@ -448,6 +458,75 @@ impl<'cfg> PackageSet<'cfg> {
Ok(pkgs)
}

/// Downloads any packages accessible from the give root ids.
pub fn download_accessible(
&self,
resolve: &Resolve,
root_ids: &[PackageId],
has_dev_units: HasDevUnits,
requested_kind: CompileKind,
target_data: &RustcTargetData,
) -> CargoResult<()> {
fn collect_used_deps(
used: &mut BTreeSet<PackageId>,
resolve: &Resolve,
pkg_id: PackageId,
has_dev_units: HasDevUnits,
requested_kind: CompileKind,
target_data: &RustcTargetData,
) -> CargoResult<()> {
if !used.insert(pkg_id) {
return Ok(());
}
let filtered_deps = resolve.deps(pkg_id).filter(|&(_id, deps)| {
deps.iter().any(|dep| {
if dep.kind() == DepKind::Development && has_dev_units == HasDevUnits::No {
return false;
}
// This is overly broad, since not all target-specific
// dependencies are used both for target and host. To tighten this
// up, this function would need to track "for_host" similar to how
// unit dependencies handles it.
if !target_data.dep_platform_activated(dep, requested_kind)
&& !target_data.dep_platform_activated(dep, CompileKind::Host)
{
return false;
}
true
})
});
for (dep_id, _deps) in filtered_deps {
collect_used_deps(
used,
resolve,
dep_id,
has_dev_units,
requested_kind,
target_data,
)?;
}
Ok(())
}

// This is sorted by PackageId to get consistent behavior and error
// messages for Cargo's testsuite. Perhaps there is a better ordering
// that optimizes download time?
let mut to_download = BTreeSet::new();

for id in root_ids {
collect_used_deps(
&mut to_download,
resolve,
*id,
has_dev_units,
requested_kind,
target_data,
)?;
}
self.get_many(to_download.into_iter())?;
Ok(())
}

pub fn sources(&self) -> Ref<'_, SourceMap<'cfg>> {
self.sources.borrow()
}
Expand Down
Loading

0 comments on commit 944f504

Please sign in to comment.