diff --git a/src/peft/tuners/lora/layer.py b/src/peft/tuners/lora/layer.py index 20bef8ed10..bd5a16a783 100644 --- a/src/peft/tuners/lora/layer.py +++ b/src/peft/tuners/lora/layer.py @@ -67,6 +67,8 @@ def __init__(self, base_layer: nn.Module, ephemeral_gpu_offload: bool = False, * base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features + elif isinstance(base_layer, nn.Conv1d): + in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Conv2d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Conv3d): @@ -1297,6 +1299,18 @@ def _get_dora_layer_class(self): return DoraConv2dLayer +class Conv1d(_ConvNd): + # Lora implemented in a conv1d layer + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if not self._kernel_dim == 3: + raise ValueError(f"Conv1d layer kernel must have 3 dimensions, not {self._kernel_dim}") + self.conv_fn = F.conv1d + + def _get_dora_layer_class(self): + raise NotImplementedError + + class Conv3d(_ConvNd): # Lora implemented in a conv3d layer def __init__(self, *args, **kwargs): @@ -1679,6 +1693,9 @@ def dispatch_default( elif isinstance(target_base_layer, torch.nn.Conv3d): kwargs.update(lora_config.loftq_config) new_module = Conv3d(target, adapter_name, **kwargs) + elif isinstance(target_base_layer, nn.Conv1d): + kwargs.update(lora_config.loftq_config) + new_module = Conv1d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.MultiheadAttention): kwargs.update(lora_config.loftq_config) new_module = MultiheadAttention(target, adapter_name, **kwargs) diff --git a/src/peft/tuners/lora/model.py b/src/peft/tuners/lora/model.py index a2e91c993e..a7f0d7d7b9 100644 --- a/src/peft/tuners/lora/model.py +++ b/src/peft/tuners/lora/model.py @@ -352,7 +352,7 @@ def dynamic_dispatch_func(target, adapter_name, lora_config, **kwargs): # no module could be matched raise ValueError( f"Target module {target} is not supported. Currently, only the following modules are supported: " - "`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv2d`, `torch.nn.Conv3d`, " + "`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv1d`, `torch.nn.Conv2d`, `torch.nn.Conv3d`, " "`transformers.pytorch_utils.Conv1D`, `torch.nn.MultiheadAttention.`." ) diff --git a/tests/test_custom_models.py b/tests/test_custom_models.py index 22472e445c..a881207a43 100644 --- a/tests/test_custom_models.py +++ b/tests/test_custom_models.py @@ -103,6 +103,7 @@ LoraConfig, {"target_modules": ["emb", "conv1d"], "use_dora": True}, ), + ("Conv1d LoRA", "Conv1d", LoraConfig, {"target_modules": ["conv1d"]}), ("Conv2d 1 LoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d"]}), ("Conv2d 2 LoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d", "lin0"]}), ("Conv2d 1 LoRA with DoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d"], "use_dora": True}), @@ -810,6 +811,25 @@ def get_output_embeddings(self): return None +class ModelConv1D(nn.Module): + def __init__(self): + super().__init__() + self.conv1d = nn.Conv1d(1, 1, 2) + self.relu = nn.ReLU() + self.flat = nn.Flatten() + self.lin0 = nn.Linear(9, 2) + self.sm = nn.LogSoftmax(dim=-1) + + def forward(self, X): + X = X.float().reshape(-1, 1, 10) + X = self.conv1d(X) + X = self.relu(X) + X = self.flat(X) + X = self.lin0(X) + X = self.sm(X) + return X + + class ModelConv2D(nn.Module): def __init__(self): super().__init__() @@ -910,6 +930,9 @@ def from_pretrained(cls, model_id, torch_dtype=None): if model_id == "EmbConv1D": return ModelEmbConv1D().to(torch_dtype) + if model_id == "Conv1d": + return ModelConv1D().to(torch_dtype) + if model_id == "Conv2d": return ModelConv2D().to(torch_dtype)