From 8fca1d6dfb7c0c7acaec9e5db9be78807f06c7f9 Mon Sep 17 00:00:00 2001 From: IlyasMoutawwakil Date: Tue, 23 Apr 2024 15:59:41 +0200 Subject: [PATCH 1/6] added token argument --- optimum/exporters/openvino/__main__.py | 3 +++ optimum/intel/generation/modeling.py | 7 ++++++- optimum/intel/ipex/modeling_base.py | 6 +++++- optimum/intel/neural_compressor/modeling_base.py | 5 ++++- optimum/intel/neural_compressor/quantization.py | 4 +++- optimum/intel/openvino/loaders.py | 3 +++ optimum/intel/openvino/modeling.py | 4 ++++ optimum/intel/openvino/modeling_base.py | 10 +++++++++- optimum/intel/openvino/modeling_base_seq2seq.py | 4 ++++ optimum/intel/openvino/modeling_decoder.py | 6 +++++- optimum/intel/openvino/modeling_diffusion.py | 5 +++++ optimum/intel/openvino/quantization.py | 4 +++- 12 files changed, 54 insertions(+), 7 deletions(-) diff --git a/optimum/exporters/openvino/__main__.py b/optimum/exporters/openvino/__main__.py index d7b29584d6..c070c33ae3 100644 --- a/optimum/exporters/openvino/__main__.py +++ b/optimum/exporters/openvino/__main__.py @@ -56,6 +56,7 @@ def main_export( force_download: bool = False, local_files_only: bool = False, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, model_kwargs: Optional[Dict[str, Any]] = None, custom_export_configs: Optional[Dict[str, "OnnxConfig"]] = None, fn_get_submodels: Optional[Callable] = None, @@ -196,6 +197,7 @@ def main_export( revision=revision, cache_dir=cache_dir, use_auth_token=use_auth_token, + token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, @@ -268,6 +270,7 @@ class StoreAttr(object): revision=revision, cache_dir=cache_dir, use_auth_token=use_auth_token, + token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, diff --git a/optimum/intel/generation/modeling.py b/optimum/intel/generation/modeling.py index 3d9c657626..ac9dafd4d4 100644 --- a/optimum/intel/generation/modeling.py +++ b/optimum/intel/generation/modeling.py @@ -353,7 +353,8 @@ def _from_pretrained( cls, model_id: Union[str, Path], config: PretrainedConfig, - use_auth_token: Optional[Union[bool, str, None]] = None, + use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[Union[str, None]] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -376,6 +377,7 @@ def _from_pretrained( repo_id=model_id, filename=file_name, use_auth_token=use_auth_token, + token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, @@ -398,6 +400,7 @@ def _from_transformers( model_id: str, config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -414,6 +417,7 @@ def _from_transformers( model_kwargs = { "revision": revision, "use_auth_token": use_auth_token, + "token": token, "cache_dir": cache_dir, "subfolder": subfolder, "local_files_only": local_files_only, @@ -436,6 +440,7 @@ def _from_transformers( config=config, use_cache=use_cache, use_auth_token=use_auth_token, + token=token, revision=revision, force_download=force_download, cache_dir=cache_dir, diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 8a7a4f2028..3f79c757c1 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -151,6 +151,7 @@ def _from_transformers( config: PretrainedConfig, use_cache: bool = True, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -166,6 +167,7 @@ def _from_transformers( model_kwargs = { "revision": revision, "use_auth_token": use_auth_token, + "token": token, "cache_dir": cache_dir, "subfolder": subfolder, "local_files_only": local_files_only, @@ -187,7 +189,8 @@ def _from_pretrained( cls, model_id: Union[str, Path], config: PretrainedConfig, - use_auth_token: Optional[Union[bool, str, None]] = None, + use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[Union[str, None]] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -211,6 +214,7 @@ def _from_pretrained( repo_id=model_id, filename=file_name, use_auth_token=use_auth_token, + token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, diff --git a/optimum/intel/neural_compressor/modeling_base.py b/optimum/intel/neural_compressor/modeling_base.py index c46e3f41c5..133beb9318 100644 --- a/optimum/intel/neural_compressor/modeling_base.py +++ b/optimum/intel/neural_compressor/modeling_base.py @@ -101,7 +101,8 @@ def _from_pretrained( cls, model_id: Union[str, Path], config: PretrainedConfig, - use_auth_token: Optional[Union[bool, str, None]] = None, + use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[Union[str, None]] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -126,6 +127,7 @@ def _from_pretrained( filename=file_name, subfolder=subfolder, use_auth_token=use_auth_token, + token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, @@ -149,6 +151,7 @@ def _from_pretrained( return _BaseQBitsAutoModelClass.from_pretrained( pretrained_model_name_or_path=model_id, use_auth_token=use_auth_token, + token=token, revision=revision, force_download=force_download, cache_dir=cache_dir, diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 09f651df05..730a56418f 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -446,7 +446,8 @@ def get_calibration_dataset( dataset_split: str = "train", preprocess_function: Optional[Callable] = None, preprocess_batch: bool = True, - use_auth_token: bool = False, + use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, ) -> Dataset: """ Create the calibration `datasets.Dataset` to use for the post-training static quantization calibration step. @@ -475,6 +476,7 @@ def get_calibration_dataset( name=dataset_config_name, split=dataset_split, use_auth_token=use_auth_token, + token=token, ) if num_samples is not None: diff --git a/optimum/intel/openvino/loaders.py b/optimum/intel/openvino/loaders.py index 61d5755cfa..fe46b5f8ae 100644 --- a/optimum/intel/openvino/loaders.py +++ b/optimum/intel/openvino/loaders.py @@ -258,6 +258,7 @@ def load_textual_inversion( proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE) use_auth_token = kwargs.pop("use_auth_token", None) + token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) @@ -320,6 +321,7 @@ def load_textual_inversion( proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, + token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, @@ -341,6 +343,7 @@ def load_textual_inversion( proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, + token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, diff --git a/optimum/intel/openvino/modeling.py b/optimum/intel/openvino/modeling.py index 9c7c2b5258..48bcaf44e3 100644 --- a/optimum/intel/openvino/modeling.py +++ b/optimum/intel/openvino/modeling.py @@ -421,6 +421,7 @@ def _from_transformers( model_id: str, config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -450,6 +451,7 @@ def _from_transformers( revision=revision, cache_dir=cache_dir, use_auth_token=use_auth_token, + token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, @@ -583,6 +585,7 @@ def from_pretrained( export: bool = False, config: Optional["PretrainedConfig"] = None, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -621,6 +624,7 @@ def from_pretrained( config=config, export=export, use_auth_token=use_auth_token, + token=token, revision=revision, force_download=force_download, cache_dir=cache_dir, diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index a48cdf5c92..493c0ccbad 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -168,7 +168,8 @@ def _from_pretrained( cls, model_id: Union[str, Path], config: PretrainedConfig, - use_auth_token: Optional[Union[bool, str, None]] = None, + use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[Union[str, None]] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -215,6 +216,7 @@ def _from_pretrained( model_cache_path = cls._cached_file( model_path=model_path, use_auth_token=use_auth_token, + token=token, revision=revision, force_download=force_download, cache_dir=cache_dir, @@ -260,6 +262,7 @@ def _set_ov_config_parameters(self): def _cached_file( model_path: Union[Path, str], use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -283,6 +286,7 @@ def _cached_file( filename=file_name.as_posix(), subfolder=subfolder, use_auth_token=use_auth_token, + token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, @@ -298,6 +302,7 @@ def _from_transformers( model_id: str, config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -344,6 +349,7 @@ def _from_transformers( revision=revision, cache_dir=cache_dir, use_auth_token=use_auth_token, + token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, @@ -366,6 +372,7 @@ def _to_load( config: PretrainedConfig, onnx_config: OnnxConfig, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -390,6 +397,7 @@ def _to_load( config=config, from_onnx=False, use_auth_token=use_auth_token, + token=token, revision=revision, force_download=force_download, cache_dir=cache_dir, diff --git a/optimum/intel/openvino/modeling_base_seq2seq.py b/optimum/intel/openvino/modeling_base_seq2seq.py index 78648e93d2..ad551731e8 100644 --- a/optimum/intel/openvino/modeling_base_seq2seq.py +++ b/optimum/intel/openvino/modeling_base_seq2seq.py @@ -109,6 +109,7 @@ def _from_pretrained( model_id: Union[str, Path], config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -191,6 +192,7 @@ def _from_pretrained( repo_id=model_id, filename=file_name, use_auth_token=use_auth_token, + token=token, revision=revision, cache_dir=cache_dir, force_download=force_download, @@ -220,6 +222,7 @@ def _from_transformers( model_id: str, config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -273,6 +276,7 @@ def _from_transformers( revision=revision, cache_dir=cache_dir, use_auth_token=use_auth_token, + token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index 39a7bee9a2..1a84c06fae 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -219,6 +219,7 @@ def _from_transformers( model_id: str, config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -255,6 +256,7 @@ def _from_transformers( revision=revision, cache_dir=cache_dir, use_auth_token=use_auth_token, + token=token, local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, @@ -562,7 +564,8 @@ def _from_pretrained( cls, model_id: Union[str, Path], config: PretrainedConfig, - use_auth_token: Optional[Union[bool, str, None]] = None, + use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[Union[str, None]] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -581,6 +584,7 @@ def _from_pretrained( model_cache_path = cls._cached_file( model_path=model_path, use_auth_token=use_auth_token, + token=token, revision=revision, force_download=force_download, cache_dir=cache_dir, diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index fb9bec7a8e..86575ad207 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -207,6 +207,7 @@ def _from_pretrained( model_id: Union[str, Path], config: Dict[str, Any], use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, cache_dir: Optional[str] = None, vae_decoder_file_name: Optional[str] = None, @@ -260,6 +261,7 @@ def _from_pretrained( cache_dir=cache_dir, local_files_only=local_files_only, use_auth_token=use_auth_token, + token=token, revision=revision, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, @@ -398,6 +400,7 @@ def _from_transformers( model_id: str, config: Dict[str, Any], use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, @@ -428,6 +431,7 @@ def _from_transformers( revision=revision, cache_dir=cache_dir, use_auth_token=use_auth_token, + token=token, local_files_only=local_files_only, force_download=force_download, ov_config=ov_config, @@ -438,6 +442,7 @@ def _from_transformers( config=config, from_onnx=False, use_auth_token=use_auth_token, + token=token, revision=revision, force_download=force_download, cache_dir=cache_dir, diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index 217e5e4056..84ec817c1a 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -540,7 +540,8 @@ def get_calibration_dataset( dataset_split: str = "train", preprocess_function: Optional[Callable] = None, preprocess_batch: bool = True, - use_auth_token: bool = False, + use_auth_token: Optional[Union[bool, str]] = None, + token: Optional[Union[bool, str]] = None, cache_dir: Optional[str] = None, ) -> datasets.Dataset: """ @@ -576,6 +577,7 @@ def get_calibration_dataset( name=dataset_config_name, split=dataset_split, use_auth_token=use_auth_token, + token=token, cache_dir=cache_dir, ) From c2a08c4b68ebb33ad2a5f60a68792c30a652893a Mon Sep 17 00:00:00 2001 From: IlyasMoutawwakil Date: Tue, 23 Apr 2024 17:28:00 +0200 Subject: [PATCH 2/6] updated docstrings --- optimum/exporters/openvino/__main__.py | 6 ++++-- optimum/intel/neural_compressor/quantization.py | 7 +++++-- optimum/intel/openvino/loaders.py | 8 +++++--- optimum/intel/openvino/modeling_base.py | 15 ++++++++++----- optimum/intel/openvino/modeling_base_seq2seq.py | 15 ++++++++++----- optimum/intel/openvino/quantization.py | 7 +++++-- 6 files changed, 39 insertions(+), 19 deletions(-) diff --git a/optimum/exporters/openvino/__main__.py b/optimum/exporters/openvino/__main__.py index 83440e029b..00fc48e382 100644 --- a/optimum/exporters/openvino/__main__.py +++ b/optimum/exporters/openvino/__main__.py @@ -108,9 +108,11 @@ def main_export( cached versions if they exist. local_files_only (`Optional[bool]`, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). - use_auth_token (`Optional[str]`, defaults to `None`): + use_auth_token (Optional[Union[bool, str]], defaults to `None`): + Deprecated. Please use `token` instead. + token (Optional[Union[bool, str]], defaults to `None`): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). model_kwargs (`Optional[Dict[str, Any]]`, defaults to `None`): Experimental usage: keyword arguments to pass to the model during the export. This argument should be used along the `custom_export_configs` argument diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 5bc903e39f..2d2e25c33a 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -466,8 +466,11 @@ def get_calibration_dataset( Processing function to apply to each example after loading dataset. preprocess_batch (`bool`, defaults to `True`): Whether the `preprocess_function` should be batched. - use_auth_token (`bool`, defaults to `False`): - Whether to use the token generated when running `transformers-cli login`. + use_auth_token (Optional[Union[bool, str]], defaults to `None`): + Deprecated. Please use `token` instead. + token (Optional[Union[bool, str]], defaults to `None`): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). Returns: The calibration `datasets.Dataset` to use for the post-training static quantization calibration step. """ diff --git a/optimum/intel/openvino/loaders.py b/optimum/intel/openvino/loaders.py index 1d4eb4a0f6..1cc138f4df 100644 --- a/optimum/intel/openvino/loaders.py +++ b/optimum/intel/openvino/loaders.py @@ -188,9 +188,11 @@ def load_textual_inversion( local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. - use_auth_token (`str` or *bool*, *optional*): - The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from - `diffusers-cli login` (stored in `~/.huggingface`) is used. + use_auth_token (Optional[Union[bool, str]], defaults to `None`): + Deprecated. Please use `token` instead. + token (Optional[Union[bool, str]], defaults to `None`): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index d359a8f8bf..24c50d7ef6 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -191,9 +191,11 @@ def _from_pretrained( Can be either: - The model id of a pretrained model hosted inside a model repo on huggingface.co. - The path to a directory containing the model weights. - use_auth_token (`str` or `bool`): - The token to use as HTTP bearer authorization for remote files. Needed to load models from a private - repository. + use_auth_token (Optional[Union[bool, str]], defaults to `None`): + Deprecated. Please use `token` instead. + token (Optional[Union[bool, str]], defaults to `None`): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*): The specific model version to use. It can be a branch name, a tag name, or a commit id. cache_dir (`Union[str, Path]`, *optional*): @@ -326,8 +328,11 @@ def _from_transformers( - The path to a directory containing the model weights. save_dir (`str` or `Path`): The directory where the exported ONNX model should be saved, default to `transformers.file_utils.default_cache_path`, which is the cache directory for transformers. - use_auth_token (`str` or `bool`): - Is needed to load models from a private repository + use_auth_token (`Optional[str]`, defaults to `None`): + Deprecated. Please use `token` instead. + token (Optional[Union[bool, str]], defaults to `None`): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`): Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id kwargs (`Dict`, *optional*): diff --git a/optimum/intel/openvino/modeling_base_seq2seq.py b/optimum/intel/openvino/modeling_base_seq2seq.py index a652bc00e3..d53541bb27 100644 --- a/optimum/intel/openvino/modeling_base_seq2seq.py +++ b/optimum/intel/openvino/modeling_base_seq2seq.py @@ -133,9 +133,11 @@ def _from_pretrained( Can be either: - The model id of a pretrained model hosted inside a model repo on huggingface.co. - The path to a directory containing the model weights. - use_auth_token (`str` or `bool`): - The token to use as HTTP bearer authorization for remote files. Needed to load models from a private - repository. + use_auth_token (Optional[Union[bool, str]], defaults to `None`): + Deprecated. Please use `token` instead. + token (Optional[Union[bool, str]], defaults to `None`): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`): The specific model version to use. It can be a branch name, a tag name, or a commit id. force_download (`bool`, *optional*, defaults to `False`): @@ -248,8 +250,11 @@ def _from_transformers( save_dir (`str` or `Path`): The directory where the exported ONNX model should be saved, defaults to `transformers.file_utils.default_cache_path`, which is the cache directory for transformers. - use_auth_token (`str` or `bool`): - Is needed to load models from a private repository + use_auth_token (`Optional[str]`, defaults to `None`): + Deprecated. Please use `token` instead. + token (Optional[Union[bool, str]], defaults to `None`): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`): Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id kwargs (`Dict`, *optional*): diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index bcd62de77b..c6cc5f4afa 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -562,8 +562,11 @@ def get_calibration_dataset( Processing function to apply to each example after loading dataset. preprocess_batch (`bool`, defaults to `True`): Whether the `preprocess_function` should be batched. - use_auth_token (`bool`, defaults to `False`): - Whether to use the token generated when running `transformers-cli login`. + use_auth_token (Optional[Union[bool, str]], defaults to `None`): + Deprecated. Please use `token` instead. + token (Optional[Union[bool, str]], defaults to `None`): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). cache_dir (`str`, *optional*): Caching directory for a calibration dataset. Returns: From 4100e2d35affda5cbae923206fbd804657ea21bc Mon Sep 17 00:00:00 2001 From: IlyasMoutawwakil Date: Tue, 23 Apr 2024 18:05:42 +0200 Subject: [PATCH 3/6] added deprecation warning --- optimum/exporters/openvino/__main__.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/optimum/exporters/openvino/__main__.py b/optimum/exporters/openvino/__main__.py index 00fc48e382..bb974176fe 100644 --- a/optimum/exporters/openvino/__main__.py +++ b/optimum/exporters/openvino/__main__.py @@ -141,6 +141,17 @@ def main_export( ``` """ + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + if compression_option is not None: logger.warning( "The `compression_option` argument is deprecated and will be removed in optimum-intel v1.17.0. " From 4a8bddd516f78904ce274e48470f7775da2b5053 Mon Sep 17 00:00:00 2001 From: IlyasMoutawwakil Date: Wed, 24 Apr 2024 12:04:18 +0200 Subject: [PATCH 4/6] added more warning and swapped token with use_ath_token --- optimum/exporters/openvino/__main__.py | 2 - optimum/intel/generation/modeling.py | 25 ++++++++-- optimum/intel/ipex/modeling_base.py | 24 +++++++++- .../intel/neural_compressor/modeling_base.py | 13 ++++- .../intel/neural_compressor/quantization.py | 12 ++++- optimum/intel/openvino/loaders.py | 17 +++++-- optimum/intel/openvino/modeling.py | 24 +++++++++- optimum/intel/openvino/modeling_base.py | 48 +++++++++++++++++-- .../intel/openvino/modeling_base_seq2seq.py | 24 +++++++++- optimum/intel/openvino/modeling_decoder.py | 24 +++++++++- optimum/intel/openvino/modeling_diffusion.py | 25 ++++++++-- optimum/intel/openvino/quantization.py | 18 +++++-- 12 files changed, 225 insertions(+), 31 deletions(-) diff --git a/optimum/exporters/openvino/__main__.py b/optimum/exporters/openvino/__main__.py index bb974176fe..662f65a19b 100644 --- a/optimum/exporters/openvino/__main__.py +++ b/optimum/exporters/openvino/__main__.py @@ -210,7 +210,6 @@ def main_export( subfolder=subfolder, revision=revision, cache_dir=cache_dir, - use_auth_token=use_auth_token, token=token, local_files_only=local_files_only, force_download=force_download, @@ -283,7 +282,6 @@ class StoreAttr(object): subfolder=subfolder, revision=revision, cache_dir=cache_dir, - use_auth_token=use_auth_token, token=token, local_files_only=local_files_only, force_download=force_download, diff --git a/optimum/intel/generation/modeling.py b/optimum/intel/generation/modeling.py index fe08b6d119..7c1754aeb9 100644 --- a/optimum/intel/generation/modeling.py +++ b/optimum/intel/generation/modeling.py @@ -364,6 +364,17 @@ def _from_pretrained( use_cache: bool = True, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + if not getattr(config, "torchscript", False): raise ValueError("`torchscript` should be set to True to load TorchScript model") @@ -377,7 +388,6 @@ def _from_pretrained( model_cache_path = hf_hub_download( repo_id=model_id, filename=file_name, - use_auth_token=use_auth_token, token=token, revision=revision, cache_dir=cache_dir, @@ -411,13 +421,23 @@ def _from_transformers( torch_dtype: Optional[Union[str, "torch.dtype"]] = None, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + if is_torch_version("<", "2.1.0"): raise ImportError("`torch>=2.0.0` is needed to trace your model") task = cls.export_feature model_kwargs = { "revision": revision, - "use_auth_token": use_auth_token, "token": token, "cache_dir": cache_dir, "subfolder": subfolder, @@ -440,7 +460,6 @@ def _from_transformers( model_id=save_dir_path, config=config, use_cache=use_cache, - use_auth_token=use_auth_token, token=token, revision=revision, force_download=force_download, diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 6f74e3a7a8..b121b6164f 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -161,13 +161,23 @@ def _from_transformers( torch_dtype: Optional[Union[str, "torch.dtype"]] = None, trust_remote_code: bool = False, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + if is_torch_version("<", "2.1.0"): raise ImportError("`torch>=2.0.0` is needed to trace your model") task = cls.export_feature model_kwargs = { "revision": revision, - "use_auth_token": use_auth_token, "token": token, "cache_dir": cache_dir, "subfolder": subfolder, @@ -200,6 +210,17 @@ def _from_pretrained( subfolder: str = "", **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + if not getattr(config, "torchscript", False): raise ValueError( "`config.torchscript` should be set to `True`, if your model is not a TorchScript model and needs to be traced please set `export=True` when loading it with `.from_pretrained()`" @@ -214,7 +235,6 @@ def _from_pretrained( model_cache_path = hf_hub_download( repo_id=model_id, filename=file_name, - use_auth_token=use_auth_token, token=token, revision=revision, cache_dir=cache_dir, diff --git a/optimum/intel/neural_compressor/modeling_base.py b/optimum/intel/neural_compressor/modeling_base.py index 0ad6ad1c66..fd4f4aa716 100644 --- a/optimum/intel/neural_compressor/modeling_base.py +++ b/optimum/intel/neural_compressor/modeling_base.py @@ -109,6 +109,17 @@ def _from_pretrained( trust_remote_code: bool = False, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + model_name_or_path = kwargs.pop("model_name_or_path", None) if model_name_or_path is not None: logger.warning("`model_name_or_path` is deprecated please use `model_id`") @@ -123,7 +134,6 @@ def _from_pretrained( repo_id=model_id, filename=file_name, subfolder=subfolder, - use_auth_token=use_auth_token, token=token, revision=revision, cache_dir=cache_dir, @@ -147,7 +157,6 @@ def _from_pretrained( return _BaseQBitsAutoModelClass.from_pretrained( pretrained_model_name_or_path=model_id, - use_auth_token=use_auth_token, token=token, revision=revision, force_download=force_download, diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 2d2e25c33a..89abe7d58c 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -474,11 +474,21 @@ def get_calibration_dataset( Returns: The calibration `datasets.Dataset` to use for the post-training static quantization calibration step. """ + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + calibration_dataset = load_dataset( dataset_name, name=dataset_config_name, split=dataset_split, - use_auth_token=use_auth_token, token=token, ) diff --git a/optimum/intel/openvino/loaders.py b/optimum/intel/openvino/loaders.py index 1cc138f4df..ff50c9391a 100644 --- a/optimum/intel/openvino/loaders.py +++ b/optimum/intel/openvino/loaders.py @@ -266,6 +266,17 @@ def load_textual_inversion( weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + if use_safetensors and not is_safetensors_available(): raise ValueError( "`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors" @@ -322,8 +333,7 @@ def load_textual_inversion( resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, - use_auth_token=use_auth_token, - token=token, + use_auth_token=token, # still uses use_auth_token revision=revision, subfolder=subfolder, user_agent=user_agent, @@ -344,8 +354,7 @@ def load_textual_inversion( resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, - use_auth_token=use_auth_token, - token=token, + use_auth_token=token, # still uses use_auth_token revision=revision, subfolder=subfolder, user_agent=user_agent, diff --git a/optimum/intel/openvino/modeling.py b/optimum/intel/openvino/modeling.py index 90b8e2c481..0599b82b52 100644 --- a/optimum/intel/openvino/modeling.py +++ b/optimum/intel/openvino/modeling.py @@ -434,6 +434,17 @@ def _from_transformers( quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) @@ -451,7 +462,6 @@ def _from_transformers( subfolder=subfolder, revision=revision, cache_dir=cache_dir, - use_auth_token=use_auth_token, token=token, local_files_only=local_files_only, force_download=force_download, @@ -596,6 +606,17 @@ def from_pretrained( trust_remote_code: bool = False, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + # Fix the mismatch between timm_config and huggingface_config local_timm_model = _is_timm_ov_dir(model_id) if local_timm_model or (not os.path.isdir(model_id) and model_info(model_id).library_name == "timm"): @@ -624,7 +645,6 @@ def from_pretrained( model_id=model_id, config=config, export=export, - use_auth_token=use_auth_token, token=token, revision=revision, force_download=force_download, diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index 24c50d7ef6..6af38b442a 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -212,13 +212,23 @@ def _from_pretrained( load_in_8bit (`bool`, *optional*, defaults to `False`): Whether or not to apply 8-bit weight quantization. """ + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + model_path = Path(model_id) default_file_name = ONNX_WEIGHTS_NAME if from_onnx else OV_XML_FILE_NAME file_name = file_name or default_file_name model_cache_path = cls._cached_file( model_path=model_path, - use_auth_token=use_auth_token, token=token, revision=revision, force_download=force_download, @@ -273,6 +283,17 @@ def _cached_file( subfolder: str = "", local_files_only: bool = False, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + # locates a file in a local folder and repo, downloads and cache it if necessary. model_path = Path(model_path) if model_path.is_dir(): @@ -288,7 +309,6 @@ def _cached_file( repo_id=model_path.as_posix(), filename=file_name.as_posix(), subfolder=subfolder, - use_auth_token=use_auth_token, token=token, revision=revision, cache_dir=cache_dir, @@ -338,6 +358,17 @@ def _from_transformers( kwargs (`Dict`, *optional*): kwargs will be passed to the model during initialization """ + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) @@ -354,7 +385,6 @@ def _from_transformers( subfolder=subfolder, revision=revision, cache_dir=cache_dir, - use_auth_token=use_auth_token, token=token, local_files_only=local_files_only, force_download=force_download, @@ -386,6 +416,17 @@ def _to_load( stateful: bool = False, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) @@ -402,7 +443,6 @@ def _to_load( model_id=save_dir_path, config=config, from_onnx=False, - use_auth_token=use_auth_token, token=token, revision=revision, force_download=force_download, diff --git a/optimum/intel/openvino/modeling_base_seq2seq.py b/optimum/intel/openvino/modeling_base_seq2seq.py index d53541bb27..7f08bdb4da 100644 --- a/optimum/intel/openvino/modeling_base_seq2seq.py +++ b/optimum/intel/openvino/modeling_base_seq2seq.py @@ -158,6 +158,17 @@ def _from_pretrained( local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). """ + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + default_encoder_file_name = ONNX_ENCODER_NAME if from_onnx else OV_ENCODER_NAME default_decoder_file_name = ONNX_DECODER_NAME if from_onnx else OV_DECODER_NAME default_decoder_with_past_file_name = ONNX_DECODER_WITH_PAST_NAME if from_onnx else OV_DECODER_WITH_PAST_NAME @@ -194,7 +205,6 @@ def _from_pretrained( model_cache_path = hf_hub_download( repo_id=model_id, filename=file_name, - use_auth_token=use_auth_token, token=token, revision=revision, cache_dir=cache_dir, @@ -260,6 +270,17 @@ def _from_transformers( kwargs (`Dict`, *optional*): kwargs will be passed to the model during initialization """ + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) @@ -281,7 +302,6 @@ def _from_transformers( subfolder=subfolder, revision=revision, cache_dir=cache_dir, - use_auth_token=use_auth_token, token=token, local_files_only=local_files_only, force_download=force_download, diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index 7f0d6e7e99..cca10cea72 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -233,6 +233,17 @@ def _from_transformers( quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) @@ -256,7 +267,6 @@ def _from_transformers( subfolder=subfolder, revision=revision, cache_dir=cache_dir, - use_auth_token=use_auth_token, token=token, local_files_only=local_files_only, force_download=force_download, @@ -578,13 +588,23 @@ def _from_pretrained( quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + model_path = Path(model_id) default_file_name = ONNX_WEIGHTS_NAME if from_onnx else OV_XML_FILE_NAME file_name = file_name or default_file_name model_cache_path = cls._cached_file( model_path=model_path, - use_auth_token=use_auth_token, token=token, revision=revision, force_download=force_download, diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index afc3c4e13e..9cc598d3ab 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -223,6 +223,17 @@ def _from_pretrained( quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + default_file_name = ONNX_WEIGHTS_NAME if from_onnx else OV_XML_FILE_NAME vae_decoder_file_name = vae_decoder_file_name or default_file_name text_encoder_file_name = text_encoder_file_name or default_file_name @@ -261,7 +272,6 @@ def _from_pretrained( model_id, cache_dir=cache_dir, local_files_only=local_files_only, - use_auth_token=use_auth_token, token=token, revision=revision, allow_patterns=allow_patterns, @@ -414,6 +424,17 @@ def _from_transformers( quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) @@ -431,7 +452,6 @@ def _from_transformers( no_post_process=True, revision=revision, cache_dir=cache_dir, - use_auth_token=use_auth_token, token=token, local_files_only=local_files_only, force_download=force_download, @@ -442,7 +462,6 @@ def _from_transformers( model_id=save_dir_path, config=config, from_onnx=False, - use_auth_token=use_auth_token, token=token, revision=revision, force_download=force_download, diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index c6cc5f4afa..c1e4084289 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -477,9 +477,9 @@ def _quantize_torchmodel( subset_size=quantization_config.num_samples, ignored_scope=quantization_config.get_ignored_scope_instance(), model_type=nncf.ModelType(quantization_config.model_type), - preset=nncf.QuantizationPreset.PERFORMANCE - if quantization_config.sym - else nncf.QuantizationPreset.MIXED, + preset=( + nncf.QuantizationPreset.PERFORMANCE if quantization_config.sym else nncf.QuantizationPreset.MIXED + ), fast_bias_correction=quantization_config.fast_bias_correction, advanced_parameters=nncf.AdvancedQuantizationParameters( overflow_fix=OverflowFix(quantization_config.overflow_fix) @@ -572,6 +572,17 @@ def get_calibration_dataset( Returns: The calibration `datasets.Dataset` to use for the post-training static quantization calibration step. """ + if use_auth_token is not None: + logger.warning( + "The `use_auth_token` argument is deprecated and will be removed soon. " + "Please use the `token` argument instead." + ) + if token is not None: + raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") + + token = use_auth_token + use_auth_token = None + if not is_datasets_available(): raise ValueError(DATASETS_IMPORT_ERROR.format("OVQuantizer.get_calibration_dataset")) from datasets import load_dataset @@ -580,7 +591,6 @@ def get_calibration_dataset( dataset_name, name=dataset_config_name, split=dataset_split, - use_auth_token=use_auth_token, token=token, cache_dir=cache_dir, ) From 69a29850113f82bfade6c37bf11adf13be42893d Mon Sep 17 00:00:00 2001 From: IlyasMoutawwakil Date: Fri, 26 Apr 2024 09:04:52 +0200 Subject: [PATCH 5/6] use future warning istead --- optimum/exporters/openvino/__main__.py | 9 +++-- optimum/intel/generation/modeling.py | 19 +++++----- optimum/intel/ipex/modeling_base.py | 27 +++++++------- .../intel/neural_compressor/modeling_base.py | 11 +++--- .../intel/neural_compressor/quantization.py | 9 +++-- optimum/intel/openvino/loaders.py | 9 +++-- optimum/intel/openvino/modeling.py | 17 ++++----- optimum/intel/openvino/modeling_base.py | 35 ++++++++----------- .../intel/openvino/modeling_base_seq2seq.py | 17 ++++----- optimum/intel/openvino/modeling_decoder.py | 17 ++++----- optimum/intel/openvino/modeling_diffusion.py | 17 ++++----- optimum/intel/openvino/quantization.py | 9 +++-- 12 files changed, 85 insertions(+), 111 deletions(-) diff --git a/optimum/exporters/openvino/__main__.py b/optimum/exporters/openvino/__main__.py index 662f65a19b..8908c430b3 100644 --- a/optimum/exporters/openvino/__main__.py +++ b/optimum/exporters/openvino/__main__.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import warnings from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union @@ -142,15 +143,13 @@ def main_export( """ if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None if compression_option is not None: logger.warning( diff --git a/optimum/intel/generation/modeling.py b/optimum/intel/generation/modeling.py index 7c1754aeb9..d17e046c3b 100644 --- a/optimum/intel/generation/modeling.py +++ b/optimum/intel/generation/modeling.py @@ -15,6 +15,7 @@ import inspect import logging import os +import warnings from pathlib import Path from tempfile import TemporaryDirectory from typing import Optional, Tuple, Union @@ -356,7 +357,7 @@ def _from_pretrained( config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, token: Optional[Union[bool, str]] = None, - revision: Optional[Union[str, None]] = None, + revision: Optional[str] = None, force_download: bool = False, cache_dir: str = HUGGINGFACE_HUB_CACHE, file_name: Optional[str] = WEIGHTS_NAME, @@ -365,15 +366,13 @@ def _from_pretrained( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None if not getattr(config, "torchscript", False): raise ValueError("`torchscript` should be set to True to load TorchScript model") @@ -422,15 +421,13 @@ def _from_transformers( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None if is_torch_version("<", "2.1.0"): raise ImportError("`torch>=2.0.0` is needed to trace your model") diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index b121b6164f..2b739ea502 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -15,6 +15,7 @@ import logging import os +import warnings from pathlib import Path from tempfile import TemporaryDirectory from typing import Optional, Tuple, Union @@ -162,15 +163,15 @@ def _from_transformers( trust_remote_code: bool = False, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, ) if token is not None: - raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - + raise ValueError( + "Both the arguments `use_auth_token` and `token` were specified, which is not supported. Please specify only `token`." + ) token = use_auth_token - use_auth_token = None if is_torch_version("<", "2.1.0"): raise ImportError("`torch>=2.0.0` is needed to trace your model") @@ -202,7 +203,7 @@ def _from_pretrained( config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, token: Optional[Union[bool, str]] = None, - revision: Optional[Union[str, None]] = None, + revision: Optional[str] = None, force_download: bool = False, cache_dir: str = HUGGINGFACE_HUB_CACHE, file_name: Optional[str] = WEIGHTS_NAME, @@ -211,15 +212,15 @@ def _from_pretrained( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, ) if token is not None: - raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - + raise ValueError( + "Both the arguments `use_auth_token` and `token` were specified, which is not supported. Please specify only `token`." + ) token = use_auth_token - use_auth_token = None if not getattr(config, "torchscript", False): raise ValueError( diff --git a/optimum/intel/neural_compressor/modeling_base.py b/optimum/intel/neural_compressor/modeling_base.py index fd4f4aa716..2556a6048e 100644 --- a/optimum/intel/neural_compressor/modeling_base.py +++ b/optimum/intel/neural_compressor/modeling_base.py @@ -14,6 +14,7 @@ import logging import os +import warnings from pathlib import Path from tempfile import TemporaryDirectory from typing import Dict, Optional, Union @@ -100,7 +101,7 @@ def _from_pretrained( config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, token: Optional[Union[bool, str]] = None, - revision: Optional[Union[str, None]] = None, + revision: Optional[str] = None, force_download: bool = False, cache_dir: str = HUGGINGFACE_HUB_CACHE, file_name: str = WEIGHTS_NAME, @@ -110,15 +111,13 @@ def _from_pretrained( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None model_name_or_path = kwargs.pop("model_name_or_path", None) if model_name_or_path is not None: diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 89abe7d58c..9ee4365930 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -16,6 +16,7 @@ import inspect import logging import types +import warnings from enum import Enum from itertools import chain from pathlib import Path @@ -475,15 +476,13 @@ def get_calibration_dataset( The calibration `datasets.Dataset` to use for the post-training static quantization calibration step. """ if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None calibration_dataset = load_dataset( dataset_name, diff --git a/optimum/intel/openvino/loaders.py b/optimum/intel/openvino/loaders.py index ff50c9391a..fc5ae97495 100644 --- a/optimum/intel/openvino/loaders.py +++ b/optimum/intel/openvino/loaders.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +import warnings from typing import Dict, List, Optional, Union import torch @@ -267,15 +268,13 @@ def load_textual_inversion( use_safetensors = kwargs.pop("use_safetensors", None) if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None if use_safetensors and not is_safetensors_available(): raise ValueError( diff --git a/optimum/intel/openvino/modeling.py b/optimum/intel/openvino/modeling.py index 0599b82b52..1c907f2135 100644 --- a/optimum/intel/openvino/modeling.py +++ b/optimum/intel/openvino/modeling.py @@ -14,6 +14,7 @@ import logging import os +import warnings from pathlib import Path from tempfile import TemporaryDirectory from typing import Dict, Optional, Union @@ -435,15 +436,13 @@ def _from_transformers( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) @@ -607,15 +606,13 @@ def from_pretrained( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None # Fix the mismatch between timm_config and huggingface_config local_timm_model = _is_timm_ov_dir(model_id) diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index 6af38b442a..7937deea52 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -14,6 +14,7 @@ import logging import os +import warnings from pathlib import Path from tempfile import TemporaryDirectory, gettempdir from typing import Dict, Optional, Union @@ -171,7 +172,7 @@ def _from_pretrained( config: PretrainedConfig, use_auth_token: Optional[Union[bool, str]] = None, token: Optional[Union[bool, str]] = None, - revision: Optional[Union[str, None]] = None, + revision: Optional[str] = None, force_download: bool = False, cache_dir: str = HUGGINGFACE_HUB_CACHE, file_name: Optional[str] = None, @@ -213,15 +214,13 @@ def _from_pretrained( Whether or not to apply 8-bit weight quantization. """ if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None model_path = Path(model_id) default_file_name = ONNX_WEIGHTS_NAME if from_onnx else OV_XML_FILE_NAME @@ -284,15 +283,13 @@ def _cached_file( local_files_only: bool = False, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None # locates a file in a local folder and repo, downloads and cache it if necessary. model_path = Path(model_path) @@ -359,15 +356,13 @@ def _from_transformers( kwargs will be passed to the model during initialization """ if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) @@ -417,15 +412,13 @@ def _to_load( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) diff --git a/optimum/intel/openvino/modeling_base_seq2seq.py b/optimum/intel/openvino/modeling_base_seq2seq.py index 7f08bdb4da..fb53f9b2e2 100644 --- a/optimum/intel/openvino/modeling_base_seq2seq.py +++ b/optimum/intel/openvino/modeling_base_seq2seq.py @@ -14,6 +14,7 @@ import logging import os +import warnings from pathlib import Path from tempfile import TemporaryDirectory from typing import Dict, Optional, Union @@ -159,15 +160,13 @@ def _from_pretrained( Whether or not to only look at local files (i.e., do not try to download the model). """ if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None default_encoder_file_name = ONNX_ENCODER_NAME if from_onnx else OV_ENCODER_NAME default_decoder_file_name = ONNX_DECODER_NAME if from_onnx else OV_DECODER_NAME @@ -271,15 +270,13 @@ def _from_transformers( kwargs will be passed to the model during initialization """ if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index cca10cea72..62a53f1c24 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -14,6 +14,7 @@ import logging import os +import warnings from pathlib import Path from tempfile import TemporaryDirectory from typing import Dict, Optional, Tuple, Union @@ -234,15 +235,13 @@ def _from_transformers( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) @@ -589,15 +588,13 @@ def _from_pretrained( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None model_path = Path(model_id) default_file_name = ONNX_WEIGHTS_NAME if from_onnx else OV_XML_FILE_NAME diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 9cc598d3ab..2de7cb8154 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -16,6 +16,7 @@ import logging import os import shutil +import warnings from copy import deepcopy from pathlib import Path from tempfile import TemporaryDirectory, gettempdir @@ -224,15 +225,13 @@ def _from_pretrained( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None default_file_name = ONNX_WEIGHTS_NAME if from_onnx else OV_XML_FILE_NAME vae_decoder_file_name = vae_decoder_file_name or default_file_name @@ -425,15 +424,13 @@ def _from_transformers( **kwargs, ): if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index c1e4084289..66b30cb045 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -16,6 +16,7 @@ import inspect import logging import os +import warnings from collections import deque from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union @@ -573,15 +574,13 @@ def get_calibration_dataset( The calibration `datasets.Dataset` to use for the post-training static quantization calibration step. """ if use_auth_token is not None: - logger.warning( - "The `use_auth_token` argument is deprecated and will be removed soon. " - "Please use the `token` argument instead." + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed soon. Please use the `token` argument instead.", + FutureWarning, ) if token is not None: raise ValueError("You cannot use both `use_auth_token` and `token` arguments at the same time.") - token = use_auth_token - use_auth_token = None if not is_datasets_available(): raise ValueError(DATASETS_IMPORT_ERROR.format("OVQuantizer.get_calibration_dataset")) From a313f59c57541a6d568e09e49fdfd5f66a8b8858 Mon Sep 17 00:00:00 2001 From: IlyasMoutawwakil Date: Fri, 26 Apr 2024 09:05:49 +0200 Subject: [PATCH 6/6] added a test to read from private repo --- tests/openvino/test_modeling.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/openvino/test_modeling.py b/tests/openvino/test_modeling.py index f84cac8161..a463f40c0e 100644 --- a/tests/openvino/test_modeling.py +++ b/tests/openvino/test_modeling.py @@ -14,6 +14,7 @@ import gc import os +import subprocess import tempfile import time import unittest @@ -247,6 +248,15 @@ def test_load_from_hub_and_save_stable_diffusion_model(self): del pipeline gc.collect() + def test_load_model_from_hub_private_with_token(self): + subprocess.run("huggingface-cli logout", shell=True) + + # a fine-grained read-only token of private repo "IlyasMoutawwakil/test-hub-bert" + token = "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" + + loaded_model = OVModelForMaskedLM.from_pretrained("IlyasMoutawwakil/test-hub-bert", use_auth_token=token) + self.assertIsInstance(loaded_model.config, PretrainedConfig) + class OVModelForSequenceClassificationIntegrationTest(unittest.TestCase): SUPPORTED_ARCHITECTURES = (