diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 5731c0204f..25c7036bd8 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -517,7 +517,7 @@ }, "numProcPerNode": { "description": "Number of processes per node. This value is inserted into the `--nproc-per-node` argument of the `torchrun` CLI. Supported values: `auto`, `cpu`, `gpu`, or int value. Defaults to `auto`.", - "type": "string" + "$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString" } } }, @@ -716,7 +716,7 @@ }, "numProcPerNode": { "description": "Number of processes/workers/slots on every training node. For the Torch runtime: `auto`, `cpu`, `gpu`, or int value can be set. For the MPI runtime only int value can be set.", - "type": "string" + "$ref": "#/definitions/k8s.io.apimachinery.pkg.util.intstr.IntOrString" }, "resourcesPerNode": { "description": "Compute resources for each training node.", diff --git a/manifests/base/crds/trainer.kubeflow.org_clustertrainingruntimes.yaml b/manifests/base/crds/trainer.kubeflow.org_clustertrainingruntimes.yaml index 2765763a36..a57a33e00f 100644 --- a/manifests/base/crds/trainer.kubeflow.org_clustertrainingruntimes.yaml +++ b/manifests/base/crds/trainer.kubeflow.org_clustertrainingruntimes.yaml @@ -587,17 +587,20 @@ spec: type: integer type: object numProcPerNode: + anyOf: + - type: integer + - type: string default: auto description: |- Number of processes per node. This value is inserted into the `--nproc-per-node` argument of the `torchrun` CLI. Supported values: `auto`, `cpu`, `gpu`, or int value. Defaults to `auto`. - type: string + x-kubernetes-int-or-string: true x-kubernetes-validations: - message: NumProcPerNode must be equal to auto, cpu, gpu, or int value - rule: self in ['auto', 'cpu', 'gpu'] || type(self) == int + rule: self > 0 || self in ['auto', 'cpu', 'gpu'] type: object type: object x-kubernetes-validations: diff --git a/manifests/base/crds/trainer.kubeflow.org_trainingruntimes.yaml b/manifests/base/crds/trainer.kubeflow.org_trainingruntimes.yaml index 0800575e0a..c7725e57a8 100644 --- a/manifests/base/crds/trainer.kubeflow.org_trainingruntimes.yaml +++ b/manifests/base/crds/trainer.kubeflow.org_trainingruntimes.yaml @@ -587,17 +587,20 @@ spec: type: integer type: object numProcPerNode: + anyOf: + - type: integer + - type: string default: auto description: |- Number of processes per node. This value is inserted into the `--nproc-per-node` argument of the `torchrun` CLI. Supported values: `auto`, `cpu`, `gpu`, or int value. Defaults to `auto`. - type: string + x-kubernetes-int-or-string: true x-kubernetes-validations: - message: NumProcPerNode must be equal to auto, cpu, gpu, or int value - rule: self in ['auto', 'cpu', 'gpu'] || type(self) == int + rule: self > 0 || self in ['auto', 'cpu', 'gpu'] type: object type: object x-kubernetes-validations: diff --git a/manifests/base/crds/trainer.kubeflow.org_trainjobs.yaml b/manifests/base/crds/trainer.kubeflow.org_trainjobs.yaml index 5c4c7cb7f4..16a2294cb9 100644 --- a/manifests/base/crds/trainer.kubeflow.org_trainjobs.yaml +++ b/manifests/base/crds/trainer.kubeflow.org_trainjobs.yaml @@ -3138,11 +3138,14 @@ spec: format: int32 type: integer numProcPerNode: + anyOf: + - type: integer + - type: string description: |- Number of processes/workers/slots on every training node. For the Torch runtime: `auto`, `cpu`, `gpu`, or int value can be set. For the MPI runtime only int value can be set. - type: string + x-kubernetes-int-or-string: true resourcesPerNode: description: Compute resources for each training node. properties: diff --git a/pkg/apis/trainer/v1alpha1/trainingruntime_types.go b/pkg/apis/trainer/v1alpha1/trainingruntime_types.go index 6d55280348..6aec8be8d2 100644 --- a/pkg/apis/trainer/v1alpha1/trainingruntime_types.go +++ b/pkg/apis/trainer/v1alpha1/trainingruntime_types.go @@ -19,6 +19,7 @@ package v1alpha1 import ( autoscalingv2 "k8s.io/api/autoscaling/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" jobsetv1alpha2 "sigs.k8s.io/jobset/api/jobset/v1alpha2" ) @@ -174,11 +175,10 @@ type TorchMLPolicySource struct { // Number of processes per node. // This value is inserted into the `--nproc-per-node` argument of the `torchrun` CLI. // Supported values: `auto`, `cpu`, `gpu`, or int value. - // TODO (andreyvelich): Add kubebuilder validation. // Defaults to `auto`. // +kubebuilder:default="auto" - // +kubebuilder:validation:XValidation:rule="self in ['auto', 'cpu', 'gpu'] || type(self) == int", message="NumProcPerNode must be equal to auto, cpu, gpu, or int value" - NumProcPerNode *string `json:"numProcPerNode,omitempty"` + // +kubebuilder:validation:XValidation:rule="self > 0 || self in ['auto', 'cpu', 'gpu']", message="NumProcPerNode must be equal to auto, cpu, gpu, or int value" + NumProcPerNode *intstr.IntOrString `json:"numProcPerNode,omitempty"` // Elastic policy for the PyTorch training. ElasticPolicy *TorchElasticPolicy `json:"elasticPolicy,omitempty"` diff --git a/pkg/apis/trainer/v1alpha1/trainjob_types.go b/pkg/apis/trainer/v1alpha1/trainjob_types.go index 03ef0816ad..0ea6ddbbf4 100644 --- a/pkg/apis/trainer/v1alpha1/trainjob_types.go +++ b/pkg/apis/trainer/v1alpha1/trainjob_types.go @@ -19,6 +19,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) const ( @@ -194,7 +195,7 @@ type Trainer struct { // Number of processes/workers/slots on every training node. // For the Torch runtime: `auto`, `cpu`, `gpu`, or int value can be set. // For the MPI runtime only int value can be set. - NumProcPerNode *string `json:"numProcPerNode,omitempty"` + NumProcPerNode *intstr.IntOrString `json:"numProcPerNode,omitempty"` } // DatasetConfig represents the desired dataset configuration. diff --git a/pkg/apis/trainer/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/trainer/v1alpha1/zz_generated.deepcopy.go index 3548d01a18..40a9d91af6 100644 --- a/pkg/apis/trainer/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/trainer/v1alpha1/zz_generated.deepcopy.go @@ -24,6 +24,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -566,7 +567,7 @@ func (in *TorchMLPolicySource) DeepCopyInto(out *TorchMLPolicySource) { *out = *in if in.NumProcPerNode != nil { in, out := &in.NumProcPerNode, &out.NumProcPerNode - *out = new(string) + *out = new(intstr.IntOrString) **out = **in } if in.ElasticPolicy != nil { @@ -776,7 +777,7 @@ func (in *Trainer) DeepCopyInto(out *Trainer) { } if in.NumProcPerNode != nil { in, out := &in.NumProcPerNode, &out.NumProcPerNode - *out = new(string) + *out = new(intstr.IntOrString) **out = **in } return diff --git a/pkg/apis/trainer/v1alpha1/zz_generated.openapi.go b/pkg/apis/trainer/v1alpha1/zz_generated.openapi.go index b8d53431b8..c0ee8376c7 100644 --- a/pkg/apis/trainer/v1alpha1/zz_generated.openapi.go +++ b/pkg/apis/trainer/v1alpha1/zz_generated.openapi.go @@ -974,8 +974,7 @@ func schema_pkg_apis_trainer_v1alpha1_TorchMLPolicySource(ref common.ReferenceCa "numProcPerNode": { SchemaProps: spec.SchemaProps{ Description: "Number of processes per node. This value is inserted into the `--nproc-per-node` argument of the `torchrun` CLI. Supported values: `auto`, `cpu`, `gpu`, or int value. Defaults to `auto`.", - Type: []string{"string"}, - Format: "", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, "elasticPolicy": { @@ -988,7 +987,7 @@ func schema_pkg_apis_trainer_v1alpha1_TorchMLPolicySource(ref common.ReferenceCa }, }, Dependencies: []string{ - "github.com/kubeflow/trainer/pkg/apis/trainer/v1alpha1.TorchElasticPolicy"}, + "github.com/kubeflow/trainer/pkg/apis/trainer/v1alpha1.TorchElasticPolicy", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, } } @@ -1352,15 +1351,14 @@ func schema_pkg_apis_trainer_v1alpha1_Trainer(ref common.ReferenceCallback) comm "numProcPerNode": { SchemaProps: spec.SchemaProps{ Description: "Number of processes/workers/slots on every training node. For the Torch runtime: `auto`, `cpu`, `gpu`, or int value can be set. For the MPI runtime only int value can be set.", - Type: []string{"string"}, - Format: "", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements"}, + "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, } } diff --git a/pkg/client/applyconfiguration/trainer/v1alpha1/torchmlpolicysource.go b/pkg/client/applyconfiguration/trainer/v1alpha1/torchmlpolicysource.go index c9d14b1ec1..6c0bae78dc 100644 --- a/pkg/client/applyconfiguration/trainer/v1alpha1/torchmlpolicysource.go +++ b/pkg/client/applyconfiguration/trainer/v1alpha1/torchmlpolicysource.go @@ -16,10 +16,14 @@ package v1alpha1 +import ( + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + // TorchMLPolicySourceApplyConfiguration represents a declarative configuration of the TorchMLPolicySource type for use // with apply. type TorchMLPolicySourceApplyConfiguration struct { - NumProcPerNode *string `json:"numProcPerNode,omitempty"` + NumProcPerNode *intstr.IntOrString `json:"numProcPerNode,omitempty"` ElasticPolicy *TorchElasticPolicyApplyConfiguration `json:"elasticPolicy,omitempty"` } @@ -32,7 +36,7 @@ func TorchMLPolicySource() *TorchMLPolicySourceApplyConfiguration { // WithNumProcPerNode sets the NumProcPerNode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NumProcPerNode field is set to the value of the last call. -func (b *TorchMLPolicySourceApplyConfiguration) WithNumProcPerNode(value string) *TorchMLPolicySourceApplyConfiguration { +func (b *TorchMLPolicySourceApplyConfiguration) WithNumProcPerNode(value intstr.IntOrString) *TorchMLPolicySourceApplyConfiguration { b.NumProcPerNode = &value return b } diff --git a/pkg/client/applyconfiguration/trainer/v1alpha1/trainer.go b/pkg/client/applyconfiguration/trainer/v1alpha1/trainer.go index d9991106ab..66e1aed89d 100644 --- a/pkg/client/applyconfiguration/trainer/v1alpha1/trainer.go +++ b/pkg/client/applyconfiguration/trainer/v1alpha1/trainer.go @@ -17,6 +17,7 @@ package v1alpha1 import ( + intstr "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/client-go/applyconfigurations/core/v1" ) @@ -29,7 +30,7 @@ type TrainerApplyConfiguration struct { Env []v1.EnvVarApplyConfiguration `json:"env,omitempty"` NumNodes *int32 `json:"numNodes,omitempty"` ResourcesPerNode *v1.ResourceRequirementsApplyConfiguration `json:"resourcesPerNode,omitempty"` - NumProcPerNode *string `json:"numProcPerNode,omitempty"` + NumProcPerNode *intstr.IntOrString `json:"numProcPerNode,omitempty"` } // TrainerApplyConfiguration constructs a declarative configuration of the Trainer type for use with @@ -98,7 +99,7 @@ func (b *TrainerApplyConfiguration) WithResourcesPerNode(value *v1.ResourceRequi // WithNumProcPerNode sets the NumProcPerNode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NumProcPerNode field is set to the value of the last call. -func (b *TrainerApplyConfiguration) WithNumProcPerNode(value string) *TrainerApplyConfiguration { +func (b *TrainerApplyConfiguration) WithNumProcPerNode(value intstr.IntOrString) *TrainerApplyConfiguration { b.NumProcPerNode = &value return b } diff --git a/pkg/runtime/core/trainingruntime_test.go b/pkg/runtime/core/trainingruntime_test.go index 4274136a53..0c04ff8b64 100644 --- a/pkg/runtime/core/trainingruntime_test.go +++ b/pkg/runtime/core/trainingruntime_test.go @@ -19,7 +19,6 @@ package core import ( "context" "fmt" - "k8s.io/utils/ptr" "testing" "github.com/google/go-cmp/cmp" @@ -27,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" schedulerpluginsv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1" @@ -264,7 +264,7 @@ func TestTrainingRuntimeNewObjects(t *testing.T) { "succeeded to build JobSet with Torch values from the TrainJob": { trainingRuntime: testingutil.MakeTrainingRuntimeWrapper(metav1.NamespaceDefault, "test-runtime").RuntimeSpec( testingutil.MakeTrainingRuntimeSpecWrapper(testingutil.MakeTrainingRuntimeWrapper(metav1.NamespaceDefault, "test-runtime").Spec). - TorchPolicy(100, ptr.To("auto")). + TorchPolicy(100, intstr.FromString("auto")). ContainerTrainer("test:runtime", []string{"runtime"}, []string{"runtime"}, resRequests). Obj(), ).Obj(), @@ -274,7 +274,7 @@ func TestTrainingRuntimeNewObjects(t *testing.T) { Trainer( testingutil.MakeTrainJobTrainerWrapper(). NumNodes(30). - NumProcPerNode(ptr.To("3")). + NumProcPerNode(intstr.FromInt32(3)). Obj(), ). Obj(), @@ -318,7 +318,7 @@ func TestTrainingRuntimeNewObjects(t *testing.T) { "succeeded to build JobSet with Torch values from the Runtime and envs.": { trainingRuntime: testingutil.MakeTrainingRuntimeWrapper(metav1.NamespaceDefault, "test-runtime").RuntimeSpec( testingutil.MakeTrainingRuntimeSpecWrapper(testingutil.MakeTrainingRuntimeWrapper(metav1.NamespaceDefault, "test-runtime").Spec). - TorchPolicy(100, ptr.To("auto")). + TorchPolicy(100, intstr.FromString("auto")). ContainerTrainer("test:runtime", []string{"runtime"}, []string{"runtime"}, resRequests). ContainerTrainerEnv( []corev1.EnvVar{ diff --git a/pkg/runtime/framework/plugins/mpi/mpi.go b/pkg/runtime/framework/plugins/mpi/mpi.go index 5d02679d05..d26c58c7c4 100644 --- a/pkg/runtime/framework/plugins/mpi/mpi.go +++ b/pkg/runtime/framework/plugins/mpi/mpi.go @@ -94,7 +94,7 @@ func (m *MPI) EnforceMLPolicy(info *runtime.Info, trainJob *trainer.TrainJob) er numProcPerNode := strconv.Itoa(int(*info.RuntimePolicy.MLPolicy.MPI.NumProcPerNode)) if trainJob.Spec.Trainer != nil && trainJob.Spec.Trainer.NumProcPerNode != nil { - numProcPerNode = *trainJob.Spec.Trainer.NumProcPerNode + numProcPerNode = (*trainJob.Spec.Trainer.NumProcPerNode).String() } info.Trainer.NumProcPerNode = numProcPerNode diff --git a/pkg/runtime/framework/plugins/torch/torch.go b/pkg/runtime/framework/plugins/torch/torch.go index 02e86073c4..af9a04c456 100644 --- a/pkg/runtime/framework/plugins/torch/torch.go +++ b/pkg/runtime/framework/plugins/torch/torch.go @@ -21,6 +21,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/ptr" @@ -66,9 +67,9 @@ func (t *Torch) EnforceMLPolicy(info *runtime.Info, trainJob *trainer.TrainJob) } info.Trainer.NumNodes = numNodes - numProcPerNode := info.RuntimePolicy.MLPolicy.Torch.NumProcPerNode + numProcPerNode := ptr.Deref(info.RuntimePolicy.MLPolicy.Torch.NumProcPerNode, intstr.FromString("auto")) if trainJob.Spec.Trainer != nil && trainJob.Spec.Trainer.NumProcPerNode != nil { - numProcPerNode = trainJob.Spec.Trainer.NumProcPerNode + numProcPerNode = ptr.Deref(trainJob.Spec.Trainer.NumProcPerNode, intstr.FromString("auto")) } // Update envs for Info object. @@ -84,7 +85,7 @@ func (t *Torch) EnforceMLPolicy(info *runtime.Info, trainJob *trainer.TrainJob) }, { Name: constants.TorchEnvNumProcPerNode, - Value: ptr.Deref(numProcPerNode, "auto"), + Value: numProcPerNode.String(), }, { Name: constants.TorchEnvNodeRank, diff --git a/pkg/util/testing/wrapper.go b/pkg/util/testing/wrapper.go index 23b38545cd..f2d735b12f 100644 --- a/pkg/util/testing/wrapper.go +++ b/pkg/util/testing/wrapper.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" jobsetv1alpha2 "sigs.k8s.io/jobset/api/jobset/v1alpha2" schedulerpluginsv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1" @@ -392,8 +393,8 @@ func (t *TrainJobTrainerWrapper) NumNodes(numNodes int32) *TrainJobTrainerWrappe return t } -func (t *TrainJobTrainerWrapper) NumProcPerNode(numProcPerNode *string) *TrainJobTrainerWrapper { - t.Trainer.NumProcPerNode = numProcPerNode +func (t *TrainJobTrainerWrapper) NumProcPerNode(numProcPerNode intstr.IntOrString) *TrainJobTrainerWrapper { + t.Trainer.NumProcPerNode = &numProcPerNode return t } @@ -689,12 +690,12 @@ func (s *TrainingRuntimeSpecWrapper) NumNodes(numNodes int32) *TrainingRuntimeSp return s } -func (s *TrainingRuntimeSpecWrapper) TorchPolicy(numNodes int32, numProcPerNode *string) *TrainingRuntimeSpecWrapper { +func (s *TrainingRuntimeSpecWrapper) TorchPolicy(numNodes int32, numProcPerNode intstr.IntOrString) *TrainingRuntimeSpecWrapper { s.MLPolicy = &trainer.MLPolicy{ NumNodes: &numNodes, MLPolicySource: trainer.MLPolicySource{ Torch: &trainer.TorchMLPolicySource{ - NumProcPerNode: numProcPerNode, + NumProcPerNode: &numProcPerNode, }, }, } diff --git a/sdk/docs/TrainerV1alpha1TorchMLPolicySource.md b/sdk/docs/TrainerV1alpha1TorchMLPolicySource.md index 6dcb288e30..e1cf9b351f 100644 --- a/sdk/docs/TrainerV1alpha1TorchMLPolicySource.md +++ b/sdk/docs/TrainerV1alpha1TorchMLPolicySource.md @@ -5,7 +5,7 @@ TorchMLPolicySource represents a PyTorch runtime configuration. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **elastic_policy** | [**TrainerV1alpha1TorchElasticPolicy**](TrainerV1alpha1TorchElasticPolicy.md) | | [optional] -**num_proc_per_node** | **str** | Number of processes per node. This value is inserted into the `--nproc-per-node` argument of the `torchrun` CLI. Supported values: `auto`, `cpu`, `gpu`, or int value. Defaults to `auto`. | [optional] +**num_proc_per_node** | [**K8sIoApimachineryPkgUtilIntstrIntOrString**](K8sIoApimachineryPkgUtilIntstrIntOrString.md) | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdk/docs/TrainerV1alpha1Trainer.md b/sdk/docs/TrainerV1alpha1Trainer.md index ac3623f559..6af31458ad 100644 --- a/sdk/docs/TrainerV1alpha1Trainer.md +++ b/sdk/docs/TrainerV1alpha1Trainer.md @@ -9,7 +9,7 @@ Name | Type | Description | Notes **env** | [**list[V1EnvVar]**](V1EnvVar.md) | List of environment variables to set in the training container. These values will be merged with the TrainingRuntime's trainer environments. | [optional] **image** | **str** | Docker image for the training container. | [optional] **num_nodes** | **int** | Number of training nodes. | [optional] -**num_proc_per_node** | **str** | Number of processes/workers/slots on every training node. For the Torch runtime: `auto`, `cpu`, `gpu`, or int value can be set. For the MPI runtime only int value can be set. | [optional] +**num_proc_per_node** | [**K8sIoApimachineryPkgUtilIntstrIntOrString**](K8sIoApimachineryPkgUtilIntstrIntOrString.md) | | [optional] **resources_per_node** | [**V1ResourceRequirements**](V1ResourceRequirements.md) | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdk/kubeflow/trainer/models/trainer_v1alpha1_torch_ml_policy_source.py b/sdk/kubeflow/trainer/models/trainer_v1alpha1_torch_ml_policy_source.py index be0bcca026..6a4c6c6022 100644 --- a/sdk/kubeflow/trainer/models/trainer_v1alpha1_torch_ml_policy_source.py +++ b/sdk/kubeflow/trainer/models/trainer_v1alpha1_torch_ml_policy_source.py @@ -34,7 +34,7 @@ class TrainerV1alpha1TorchMLPolicySource(object): """ openapi_types = { 'elastic_policy': 'TrainerV1alpha1TorchElasticPolicy', - 'num_proc_per_node': 'str' + 'num_proc_per_node': 'K8sIoApimachineryPkgUtilIntstrIntOrString' } attribute_map = { @@ -82,10 +82,9 @@ def elastic_policy(self, elastic_policy): def num_proc_per_node(self): """Gets the num_proc_per_node of this TrainerV1alpha1TorchMLPolicySource. # noqa: E501 - Number of processes per node. This value is inserted into the `--nproc-per-node` argument of the `torchrun` CLI. Supported values: `auto`, `cpu`, `gpu`, or int value. Defaults to `auto`. # noqa: E501 :return: The num_proc_per_node of this TrainerV1alpha1TorchMLPolicySource. # noqa: E501 - :rtype: str + :rtype: K8sIoApimachineryPkgUtilIntstrIntOrString """ return self._num_proc_per_node @@ -93,10 +92,9 @@ def num_proc_per_node(self): def num_proc_per_node(self, num_proc_per_node): """Sets the num_proc_per_node of this TrainerV1alpha1TorchMLPolicySource. - Number of processes per node. This value is inserted into the `--nproc-per-node` argument of the `torchrun` CLI. Supported values: `auto`, `cpu`, `gpu`, or int value. Defaults to `auto`. # noqa: E501 :param num_proc_per_node: The num_proc_per_node of this TrainerV1alpha1TorchMLPolicySource. # noqa: E501 - :type: str + :type: K8sIoApimachineryPkgUtilIntstrIntOrString """ self._num_proc_per_node = num_proc_per_node diff --git a/sdk/kubeflow/trainer/models/trainer_v1alpha1_trainer.py b/sdk/kubeflow/trainer/models/trainer_v1alpha1_trainer.py index 18ac69b23a..57e857b971 100644 --- a/sdk/kubeflow/trainer/models/trainer_v1alpha1_trainer.py +++ b/sdk/kubeflow/trainer/models/trainer_v1alpha1_trainer.py @@ -38,7 +38,7 @@ class TrainerV1alpha1Trainer(object): 'env': 'list[V1EnvVar]', 'image': 'str', 'num_nodes': 'int', - 'num_proc_per_node': 'str', + 'num_proc_per_node': 'K8sIoApimachineryPkgUtilIntstrIntOrString', 'resources_per_node': 'V1ResourceRequirements' } @@ -201,10 +201,9 @@ def num_nodes(self, num_nodes): def num_proc_per_node(self): """Gets the num_proc_per_node of this TrainerV1alpha1Trainer. # noqa: E501 - Number of processes/workers/slots on every training node. For the Torch runtime: `auto`, `cpu`, `gpu`, or int value can be set. For the MPI runtime only int value can be set. # noqa: E501 :return: The num_proc_per_node of this TrainerV1alpha1Trainer. # noqa: E501 - :rtype: str + :rtype: K8sIoApimachineryPkgUtilIntstrIntOrString """ return self._num_proc_per_node @@ -212,10 +211,9 @@ def num_proc_per_node(self): def num_proc_per_node(self, num_proc_per_node): """Sets the num_proc_per_node of this TrainerV1alpha1Trainer. - Number of processes/workers/slots on every training node. For the Torch runtime: `auto`, `cpu`, `gpu`, or int value can be set. For the MPI runtime only int value can be set. # noqa: E501 :param num_proc_per_node: The num_proc_per_node of this TrainerV1alpha1Trainer. # noqa: E501 - :type: str + :type: K8sIoApimachineryPkgUtilIntstrIntOrString """ self._num_proc_per_node = num_proc_per_node diff --git a/test/integration/controller/trainjob_controller_test.go b/test/integration/controller/trainjob_controller_test.go index 88b1fb6688..9378554788 100644 --- a/test/integration/controller/trainjob_controller_test.go +++ b/test/integration/controller/trainjob_controller_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" jobsetv1alpha2 "sigs.k8s.io/jobset/api/jobset/v1alpha2" @@ -278,7 +279,7 @@ var _ = ginkgo.Describe("TrainJob controller", ginkgo.Ordered, func() { trainingRuntime = testingutil.MakeTrainingRuntimeWrapper(ns.Name, "alpha"). RuntimeSpec( testingutil.MakeTrainingRuntimeSpecWrapper(testingutil.MakeTrainingRuntimeWrapper(metav1.NamespaceDefault, "alpha").Spec). - TorchPolicy(100, ptr.To("auto")). + TorchPolicy(100, intstr.FromString("auto")). ContainerTrainer("test:runtime", []string{"runtime"}, []string{"runtime"}, resRequests). Obj()). Obj() diff --git a/test/integration/webhooks/trainingruntime_webhook_test.go b/test/integration/webhooks/trainingruntime_webhook_test.go index bffb37507d..839293a3d9 100644 --- a/test/integration/webhooks/trainingruntime_webhook_test.go +++ b/test/integration/webhooks/trainingruntime_webhook_test.go @@ -21,6 +21,7 @@ import ( "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -164,7 +165,7 @@ var _ = ginkgo.Describe("TrainingRuntime marker validations and defaulting", gin runtime.Spec.MLPolicy = &trainer.MLPolicy{ MLPolicySource: trainer.MLPolicySource{ Torch: &trainer.TorchMLPolicySource{ - NumProcPerNode: ptr.To("auto"), + NumProcPerNode: ptr.To(intstr.FromString("auto")), }, }, }