From b1149129ab9dc0690b1e7a14cdc81e90229001be Mon Sep 17 00:00:00 2001 From: Tim de Jager Date: Tue, 3 Sep 2024 16:39:41 +0200 Subject: [PATCH] fix: use two in memory indexes, for resolve and builds (#1969) This PR makes the resolver use two memory indexes, one for the resolution process and one for the potential building of wheels. Even though uv uses different internal revolvers it was sometimes still getting data that was overridden by us during wheel builds, which was unexpected. This was cropping up in issues like #1686. We've made a minimal version of the lock file where this problem was occuring. And added that as a test. --------- Co-authored-by: Hofer-Julian <30049909+Hofer-Julian@users.noreply.github.com> --- src/lock_file/resolve/pypi.rs | 15 +++++++++++--- src/lock_file/resolve/resolver_provider.rs | 14 ++++++++----- tests/install_tests.rs | 24 ++++++++++++++++++++++ 3 files changed, 45 insertions(+), 8 deletions(-) diff --git a/src/lock_file/resolve/pypi.rs b/src/lock_file/resolve/pypi.rs index 6f76eedaf..1adfb9383 100644 --- a/src/lock_file/resolve/pypi.rs +++ b/src/lock_file/resolve/pypi.rs @@ -285,7 +285,13 @@ pub async fn resolve_pypi( }; // Create a shared in-memory index. - let in_memory_index = InMemoryIndex::default(); + // We need two in-memory indexes, one for the build dispatch and one for the resolver. + // because we manually override requests for the resolver, + // but we don't want to override requests for the build dispatch. + // + // The BuildDispatch might resolve or install when building wheels which will be mostly + // with build isolation. In that case we want to use fresh non-tampered requests. + let build_dispatch_in_memory_index = InMemoryIndex::default(); let config_settings = ConfigSettings::default(); let env = PythonEnvironment::from_interpreter(interpreter.clone()); @@ -303,7 +309,7 @@ pub async fn resolve_pypi( &interpreter, &index_locations, &flat_index, - &in_memory_index, + &build_dispatch_in_memory_index, &git_resolver, &context.in_flight, IndexStrategy::default(), @@ -406,6 +412,9 @@ pub async fn resolve_pypi( package_requests: package_requests.clone(), }; + // We need a new in-memory index for the resolver so that it does not conflict with the build dispatch + // one. As we have noted in the comment above. + let resolver_in_memory_index = InMemoryIndex::default(); let python_version = PythonVersion::from_str(&interpreter_version.to_string()) .expect("could not get version from interpreter"); let resolution = Resolver::new_custom_io( @@ -414,7 +423,7 @@ pub async fn resolve_pypi( &context.hash_strategy, ResolverMarkers::SpecificEnvironment(marker_environment.into()), &PythonRequirement::from_python_version(&interpreter, &python_version), - &in_memory_index, + &resolver_in_memory_index, &git_resolver, provider, EmptyInstalledPackages, diff --git a/src/lock_file/resolve/resolver_provider.rs b/src/lock_file/resolve/resolver_provider.rs index 19100870f..8dcdc816f 100644 --- a/src/lock_file/resolve/resolver_provider.rs +++ b/src/lock_file/resolve/resolver_provider.rs @@ -39,6 +39,13 @@ impl<'a, Context: BuildContext> ResolverProvider for CondaResolverProvider<'a, C ) -> impl Future + 'io { if let Some((repodata_record, identifier)) = self.conda_python_identifiers.get(package_name) { + let version = repodata_record.version().to_string(); + + tracing::debug!( + "overriding PyPI package version request {}=={}", + package_name, + version + ); // If we encounter a package that was installed by conda we simply return a single // available version in the form of a source distribution with the URL of the // conda package. @@ -60,11 +67,7 @@ impl<'a, Context: BuildContext> ResolverProvider for CondaResolverProvider<'a, C let source_dist = RegistrySourceDist { name: identifier.name.as_normalized().clone(), - version: repodata_record - .version() - .to_string() - .parse() - .expect("could not convert to pypi version"), + version: version.parse().expect("could not convert to pypi version"), file: Box::new(file), index: IndexUrl::Pypi(VerbatimUrl::from_url( consts::DEFAULT_PYPI_INDEX_URL.clone(), @@ -104,6 +107,7 @@ impl<'a, Context: BuildContext> ResolverProvider for CondaResolverProvider<'a, C ) -> impl Future + 'io { if let Dist::Source(SourceDist::Registry(RegistrySourceDist { name, .. })) = dist { if let Some((_, iden)) = self.conda_python_identifiers.get(name) { + tracing::debug!("overriding PyPI package metadata request {}", name); // If this is a Source dist and the package is actually installed by conda we // create fake metadata with no dependencies. We assume that all conda installed // packages are properly installed including its dependencies. diff --git a/tests/install_tests.rs b/tests/install_tests.rs index ede9b7204..8396f2ab0 100644 --- a/tests/install_tests.rs +++ b/tests/install_tests.rs @@ -650,6 +650,30 @@ setup( pixi.install().await.expect("cannot install project"); } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +#[cfg_attr(not(feature = "slow_integration_tests"), ignore)] +async fn test_setuptools_override_failure() { + // This was causing issues like: https://github.com/prefix-dev/pixi/issues/1686 + let manifest = format!( + r#" + [project] + channels = ["conda-forge"] + name = "pixi-source-problem" + platforms = ["{platform}"] + + [dependencies] + pip = ">=24.0,<25" + + # The transitive dependencies of viser were causing issues + [pypi-dependencies] + viser = "==0.2.7" + "#, + platform = Platform::current() + ); + let pixi = PixiControl::from_manifest(&manifest).expect("cannot instantiate pixi project"); + pixi.install().await.expect("cannot install project"); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[cfg_attr(not(feature = "slow_integration_tests"), ignore)] async fn test_many_linux_wheel_tag() {