From d5c64de6d554e70ed2fe660b756ec10df07a2ff1 Mon Sep 17 00:00:00 2001 From: Daniel Holth Date: Mon, 13 Nov 2023 14:27:16 -0500 Subject: [PATCH 1/2] call two-file api.Repo() --- conda_libmamba_solver/index.py | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/conda_libmamba_solver/index.py b/conda_libmamba_solver/index.py index 7d05c3d8..d6f72a1e 100644 --- a/conda_libmamba_solver/index.py +++ b/conda_libmamba_solver/index.py @@ -70,6 +70,8 @@ We maintain a map of subdir-specific URLs to `conda.model.channel.Channel` and `libmamba.Repo` objects. """ +from __future__ import annotations + import logging import os from dataclasses import dataclass @@ -156,8 +158,8 @@ def reload_local_channels(self): """ for url, info in self._index.items(): if url.startswith("file://"): - url, json_path = self._fetch_channel(url) - new = self._json_path_to_repo_info(url, json_path) + url, json_path, overlay_path = self._fetch_channel(url) + new = self._json_path_to_repo_info(url, json_path, overlay_path) self._repos[self._repos.index(info.repo)] = new.repo self._index[url] = new set_channel_priorities(self._index) @@ -215,7 +217,7 @@ def _repo_from_records( finally: os.unlink(f.name) - def _fetch_channel(self, url: str) -> Tuple[str, os.PathLike]: + def _fetch_channel(self, url: str) -> Tuple[str, Path, Path | None]: channel = Channel.from_url(url) if not channel.subdir: raise ValueError(f"Channel URLs must specify a subdir! Provided: {url}") @@ -228,13 +230,23 @@ def _fetch_channel(self, url: str) -> Tuple[str, os.PathLike]: del SubdirData._cache_[(url, self._repodata_fn)] # /Workaround - log.debug("Fetching %s with SubdirData.repo_fetch", channel) - subdir_data = SubdirData(channel, repodata_fn=self._repodata_fn) - json_path, _ = subdir_data.repo_fetch.fetch_latest_path() + # repo_fetch is created on each property access + repo_fetch = SubdirData(channel, repodata_fn=self._repodata_fn).repo_fetch + overlay_path = None + if hasattr(repo_fetch, "fetch_latest_path_and_overlay"): + log.debug( + "Fetching %s with SubdirData.repo_fetch.fetch_latest_path_and_overlay", channel + ) + json_path, overlay_path, _ = repo_fetch.fetch_latest_path_and_overlay() + else: + log.debug("Fetching %s with SubdirData.repo_fetch", channel) + json_path, _ = repo_fetch.fetch_latest_path() - return url, json_path + return url, json_path, overlay_path - def _json_path_to_repo_info(self, url: str, json_path: str) -> Optional[_ChannelRepoInfo]: + def _json_path_to_repo_info( + self, url: str, json_path: str | Path, overlay_path: Path | None + ) -> Optional[_ChannelRepoInfo]: channel = Channel.from_url(url) noauth_url = channel.urls(with_credentials=False, subdirs=(channel.subdir,))[0] json_path = Path(json_path) @@ -263,7 +275,11 @@ def _json_path_to_repo_info(self, url: str, json_path: str) -> Optional[_Channel else: path_to_use = json_path - repo = api.Repo(self._pool, noauth_url, str(path_to_use), escape_channel_url(noauth_url)) + if(overlay_path): + # from https://github.com/mamba-org/mamba/pull/2969 + repo = api.Repo(self._pool, noauth_url, str(path_to_use), str(overlay_path), escape_channel_url(noauth_url)) + else: + repo = api.Repo(self._pool, noauth_url, str(path_to_use), escape_channel_url(noauth_url)) return _ChannelRepoInfo( repo=repo, channel=channel, From 0cf6b34d4a94454bacf9cc9951182889e3e96e83 Mon Sep 17 00:00:00 2001 From: Daniel Holth Date: Mon, 13 Nov 2023 17:10:05 -0500 Subject: [PATCH 2/2] accept (Path, Path) in additional place; format --- conda_libmamba_solver/index.py | 47 ++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/conda_libmamba_solver/index.py b/conda_libmamba_solver/index.py index d6f72a1e..c42d0ba6 100644 --- a/conda_libmamba_solver/index.py +++ b/conda_libmamba_solver/index.py @@ -111,7 +111,7 @@ class LibMambaIndexHelper(IndexHelper): def __init__( self, installed_records: Iterable[PackageRecord] = (), - channels: Iterable[Union[Channel, str]] = None, + channels: Iterable[Channel | str] = None, subdirs: Iterable[str] = None, repodata_fn: str = REPODATA_FN, query_format=api.QueryFormat.JSON, @@ -217,7 +217,7 @@ def _repo_from_records( finally: os.unlink(f.name) - def _fetch_channel(self, url: str) -> Tuple[str, Path, Path | None]: + def _fetch_channel(self, url: str) -> tuple[str, Path, Path | None]: channel = Channel.from_url(url) if not channel.subdir: raise ValueError(f"Channel URLs must specify a subdir! Provided: {url}") @@ -245,8 +245,8 @@ def _fetch_channel(self, url: str) -> Tuple[str, Path, Path | None]: return url, json_path, overlay_path def _json_path_to_repo_info( - self, url: str, json_path: str | Path, overlay_path: Path | None - ) -> Optional[_ChannelRepoInfo]: + self, url: str, json_path: str | Path, overlay_path: Path | None = None + ) -> _ChannelRepoInfo | None: channel = Channel.from_url(url) noauth_url = channel.urls(with_credentials=False, subdirs=(channel.subdir,))[0] json_path = Path(json_path) @@ -275,11 +275,19 @@ def _json_path_to_repo_info( else: path_to_use = json_path - if(overlay_path): + if overlay_path: # from https://github.com/mamba-org/mamba/pull/2969 - repo = api.Repo(self._pool, noauth_url, str(path_to_use), str(overlay_path), escape_channel_url(noauth_url)) + repo = api.Repo( + self._pool, + noauth_url, + str(path_to_use), + str(overlay_path), + escape_channel_url(noauth_url), + ) else: - repo = api.Repo(self._pool, noauth_url, str(path_to_use), escape_channel_url(noauth_url)) + repo = api.Repo( + self._pool, noauth_url, str(path_to_use), escape_channel_url(noauth_url) + ) return _ChannelRepoInfo( repo=repo, channel=channel, @@ -287,7 +295,7 @@ def _json_path_to_repo_info( noauth_url=noauth_url, ) - def _load_channels(self) -> Dict[str, _ChannelRepoInfo]: + def _load_channels(self) -> dict[str, _ChannelRepoInfo]: # 1. Obtain and deduplicate URLs from channels urls = [] seen_noauth = set() @@ -317,12 +325,15 @@ def _load_channels(self) -> Dict[str, _ChannelRepoInfo]: else partial(ThreadLimitedThreadPoolExecutor, max_workers=context.repodata_threads) ) with Executor() as executor: - jsons = {url: str(path) for (url, path) in executor.map(self._fetch_channel, urls)} + jsons = { + url: (path, overlay) + for (url, path, overlay) in executor.map(self._fetch_channel, urls) + } # 3. Create repos in same order as `urls` index = {} for url in urls: - info = self._json_path_to_repo_info(url, jsons[url]) + info = self._json_path_to_repo_info(url, *jsons[url]) if info is not None: index[info.noauth_url] = info @@ -337,24 +348,22 @@ def _load_installed(self, records: Iterable[PackageRecord]) -> api.Repo: return repo def whoneeds( - self, query: Union[str, MatchSpec], records=True - ) -> Union[Iterable[PackageRecord], dict, str]: + self, query: str | MatchSpec, records=True + ) -> Iterable[PackageRecord] | dict | str: result_str = self._query.whoneeds(self._prepare_query(query), self._format) if self._format == api.QueryFormat.JSON: return self._process_query_result(result_str, records=records) return result_str def depends( - self, query: Union[str, MatchSpec], records=True - ) -> Union[Iterable[PackageRecord], dict, str]: + self, query: str | MatchSpec, records=True + ) -> Iterable[PackageRecord] | dict | str: result_str = self._query.depends(self._prepare_query(query), self._format) if self._format == api.QueryFormat.JSON: return self._process_query_result(result_str, records=records) return result_str - def search( - self, query: Union[str, MatchSpec], records=True - ) -> Union[Iterable[PackageRecord], dict, str]: + def search(self, query: str | MatchSpec, records=True) -> Iterable[PackageRecord] | dict | str: result_str = self._query.find(self._prepare_query(query), self._format) if self._format == api.QueryFormat.JSON: return self._process_query_result(result_str, records=records) @@ -371,7 +380,7 @@ def explicit_pool(self, specs: Iterable[MatchSpec]) -> Iterable[str]: explicit_pool.add(record.name) return tuple(explicit_pool) - def _prepare_query(self, query: Union[str, MatchSpec]) -> str: + def _prepare_query(self, query: str | MatchSpec) -> str: if isinstance(query, str): if "[" not in query: return query @@ -398,7 +407,7 @@ def _process_query_result( self, result_str, records=True, - ) -> Union[Iterable[PackageRecord], dict]: + ) -> Iterable[PackageRecord] | dict: result = json_load(result_str) if result.get("result", {}).get("status") != "OK": query_type = result.get("query", {}).get("type", "")