Skip to content

Commit

Permalink
Add option to force specifying pool name
Browse files Browse the repository at this point in the history
in some cases it is desired to make specifying at least one
pool in poolName or pools mandatory. To achieve this we add a
new IPAM CNI parameter which can be defined in nv-ipam config
file. this will prevent CNI from using network name as pool name
in case poolName was not specified.

Signed-off-by: adrianc <[email protected]>
  • Loading branch information
adrianchiris committed Sep 25, 2024
1 parent 6ce8885 commit 52b3daa
Show file tree
Hide file tree
Showing 7 changed files with 163 additions and 59 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Build the image
FROM golang:1.21 as builder
FROM golang:1.21 AS builder

WORKDIR /workspace
# Copy the Go Modules manifests
Expand Down
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -559,6 +559,8 @@ Shim CNI Configuration flags:
shim CNI config: timeout for IPAM daemon calls (default 5)
--cni-daemon-socket string
shim CNI config: IPAM daemon socket path (default "unix:///var/lib/cni/nv-ipam/daemon.sock")
--cni-force-pool-name string
shim CNI config: force specifying pool name in CNI configuration
--cni-log-file string
shim CNI config: path to log file for shim CNI (default "/var/log/nv-ipam-cni.log")
--cni-log-level string
Expand All @@ -579,6 +581,7 @@ nv-ipam accepts the following CNI configuration:
```json
{
"type": "nv-ipam",
"forcePoolName" : false,
"poolName": "pool1,pool2",
"poolType": "ippool",
"daemonSocket": "unix:///var/lib/cni/nv-ipam/daemon.sock",
Expand All @@ -590,6 +593,7 @@ nv-ipam accepts the following CNI configuration:
```

* `type` (string, required): CNI plugin name, MUST be `"nv-ipam"`
* `forcePoolName` (bool, optional): force specifying pool name in CNI configuration.
* `poolName` (string, optional): name of the Pool to be used for IP allocation.
It is possible to allocate two IPs for the interface from different pools by specifying pool names separated by coma,
e.g. `"my-ipv4-pool,my-ipv6-pool"`. The primary intent to support multiple pools is a dual-stack use-case when an
Expand Down
6 changes: 3 additions & 3 deletions cmd/ipam-node/app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,9 +359,9 @@ func createNVIPAMConfig(log logr.Logger, opts *options.Options) error {
"daemonSocket": "%s",
"daemonCallTimeoutSeconds": %d,
"logFile": "%s",
"logLevel": "%s"
}
`, opts.CNIDaemonSocket, opts.CNIDaemonCallTimeoutSeconds, opts.CNILogFile, opts.CNILogLevel)
"logLevel": "%s",
"forcePoolName": %v
}`, opts.CNIDaemonSocket, opts.CNIDaemonCallTimeoutSeconds, opts.CNILogFile, opts.CNILogLevel, opts.CNIForcePoolName)

err := renameio.WriteFile(filepath.Join(opts.CNIConfDir, cniTypes.ConfFileName), []byte(cfg), 0664)
if err != nil {
Expand Down
162 changes: 108 additions & 54 deletions cmd/ipam-node/app/app_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,11 @@
package app_test

import (
"context"
"encoding/json"
"os"
"path/filepath"
"sync"
"time"

"github.com/go-logr/logr"
Expand All @@ -34,6 +37,7 @@ import (
ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1"
"github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-node/app"
"github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-node/app/options"
"github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types"
)

const (
Expand Down Expand Up @@ -158,6 +162,7 @@ func getOptions(testDir string) *options.Options {
opts.CNIConfDir = cniConfDir
opts.CNIDaemonSocket = daemonSocket
opts.PoolsNamespace = testNamespace
opts.CNIForcePoolName = true
return opts
}

Expand All @@ -175,68 +180,117 @@ func getValidReqParams(uid, name, namespace string) *nodev1.IPAMParameters {
}
}

func pathExists(path string) error {
_, err := os.Stat(path)
return err
}

var _ = Describe("IPAM Node daemon", func() {
It("Validate main flows", func() {
done := make(chan interface{})
var (
wg sync.WaitGroup
testDir string
opts *options.Options
cFuncDaemon context.CancelFunc
daemonCtx context.Context
)

BeforeEach(func() {
wg = sync.WaitGroup{}
testDir = GinkgoT().TempDir()
opts = getOptions(testDir)

daemonCtx, cFuncDaemon = context.WithCancel(ctx)
wg.Add(1)
go func() {
defer wg.Done()
defer GinkgoRecover()
defer close(done)
testDir := GinkgoT().TempDir()
opts := getOptions(testDir)
Expect(app.RunNodeDaemon(logr.NewContext(daemonCtx, klog.NewKlogr()), cfg, opts)).NotTo(HaveOccurred())
}()
})

AfterEach(func() {
cFuncDaemon()
wg.Wait()
})

It("Validate main flows", func() {
createTestIPPools()
createTestCIDRPools()
pod := createTestPod()

createTestIPPools()
createTestCIDRPools()
pod := createTestPod()
conn, err := grpc.DialContext(ctx, opts.CNIDaemonSocket,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock())
Expect(err).NotTo(HaveOccurred())

ctx = logr.NewContext(ctx, klog.NewKlogr())
grpcClient := nodev1.NewIPAMServiceClient(conn)

go func() {
Expect(app.RunNodeDaemon(ctx, cfg, opts)).NotTo(HaveOccurred())
}()
cidrPoolParams := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace)
cidrPoolParams.PoolType = nodev1.PoolType_POOL_TYPE_CIDRPOOL
ipPoolParams := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace)

conn, err := grpc.DialContext(ctx, opts.CNIDaemonSocket,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock())
for _, params := range []*nodev1.IPAMParameters{ipPoolParams, cidrPoolParams} {
// no allocation yet
_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(status.Code(err) == codes.NotFound).To(BeTrue())

// allocate
resp, err := grpcClient.Allocate(ctx, &nodev1.AllocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())
Expect(resp.Allocations).To(HaveLen(2))
Expect(resp.Allocations[0].Pool).NotTo(BeEmpty())
Expect(resp.Allocations[0].Gateway).NotTo(BeEmpty())
Expect(resp.Allocations[0].Ip).NotTo(BeEmpty())

grpcClient := nodev1.NewIPAMServiceClient(conn)

cidrPoolParams := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace)
cidrPoolParams.PoolType = nodev1.PoolType_POOL_TYPE_CIDRPOOL
ipPoolParams := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace)

for _, params := range []*nodev1.IPAMParameters{ipPoolParams, cidrPoolParams} {
// no allocation yet
_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(status.Code(err) == codes.NotFound).To(BeTrue())

// allocate
resp, err := grpcClient.Allocate(ctx, &nodev1.AllocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())
Expect(resp.Allocations).To(HaveLen(2))
Expect(resp.Allocations[0].Pool).NotTo(BeEmpty())
Expect(resp.Allocations[0].Gateway).NotTo(BeEmpty())
Expect(resp.Allocations[0].Ip).NotTo(BeEmpty())

_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// deallocate
_, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// deallocate should be idempotent
_, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// check should fail
_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(status.Code(err) == codes.NotFound).To(BeTrue())
}
}()
Eventually(done, 5*time.Minute).Should(BeClosed())
_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// deallocate
_, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// deallocate should be idempotent
_, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// check should fail
_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(status.Code(err) == codes.NotFound).To(BeTrue())
}
})

It("deployShimCNI works as expected", func() {
// cni binary copied
Eventually(func() error {
return pathExists(filepath.Join(testDir, "cnibin", "nv-ipam"))
}).
WithTimeout(2 * time.Second).
ShouldNot(HaveOccurred())
// conf file copied
Eventually(func() error {
return pathExists(filepath.Join(testDir, "cniconf", "nv-ipam.conf"))
}).
WithTimeout(2 * time.Second).
ShouldNot(HaveOccurred())
// store dir created
Eventually(func() error {
return pathExists(filepath.Join(testDir, "store"))
}).
WithTimeout(2 * time.Second).
ShouldNot(HaveOccurred())
// conf file contains expected results
data, err := os.ReadFile(filepath.Join(testDir, "cniconf", "nv-ipam.conf"))
Expect(err).ToNot(HaveOccurred())
ipamConf := types.IPAMConf{}
Expect(json.Unmarshal(data, &ipamConf)).ToNot(HaveOccurred())
Expect(ipamConf.DaemonSocket).To(Equal(opts.CNIDaemonSocket))
Expect(ipamConf.DaemonCallTimeoutSeconds).To(Equal(opts.CNIDaemonCallTimeoutSeconds))
Expect(ipamConf.LogFile).To(Equal(opts.CNILogFile))
Expect(ipamConf.LogLevel).To(Equal(opts.CNILogLevel))
Expect(ipamConf.ForcePoolName).ToNot(BeNil())
Expect(*ipamConf.ForcePoolName).To(Equal(opts.CNIForcePoolName))
})
})
4 changes: 4 additions & 0 deletions cmd/ipam-node/app/options/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ func New() *Options {
CNIConfDir: cniTypes.DefaultConfDir,
CNILogLevel: cniTypes.DefaultLogLevel,
CNILogFile: cniTypes.DefaultLogFile,
CNIForcePoolName: false,
}
}

Expand All @@ -75,6 +76,7 @@ type Options struct {
CNIDaemonCallTimeoutSeconds int
CNILogFile string
CNILogLevel string
CNIForcePoolName bool
}

// AddNamedFlagSets register flags for common options in NamedFlagSets
Expand Down Expand Up @@ -119,6 +121,8 @@ func (o *Options) AddNamedFlagSets(sharedFS *cliflag.NamedFlagSets) {
"shim CNI config: path to log file for shim CNI")
cniFS.StringVar(&o.CNILogLevel, "cni-log-level", o.CNILogLevel,
"shim CNI config: log level for shim CNI")
cniFS.BoolVar(&o.CNIForcePoolName, "cni-force-pool-name", o.CNIForcePoolName,
"shim CNI config: force specifying pool name in CNI configuration")
}

// Validate registered options
Expand Down
19 changes: 18 additions & 1 deletion pkg/cni/types/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (

"github.com/containernetworking/cni/pkg/skel"
"github.com/containernetworking/cni/pkg/types"
"k8s.io/utils/ptr"

"github.com/Mellanox/nvidia-k8s-ipam/pkg/common"
)
Expand Down Expand Up @@ -54,6 +55,9 @@ type ConfLoader interface {
type IPAMConf struct {
types.IPAM

// ForcePoolName if set, specifying PoolName in CNI call is mandatory and will
// not be derrived from the network name.
ForcePoolName *bool `json:"forcePoolName,omitempty"`
// PoolName is the name of the pool to be used to allocate IP
PoolName string `json:"poolName,omitempty"`
// PoolType is the type of the pool which is referred by the PoolName,
Expand Down Expand Up @@ -150,7 +154,7 @@ func (cl *confLoader) LoadConf(args *skel.CmdArgs) (*NetConf, error) {
// overlay config with defaults
defaultConf := &IPAMConf{
// use network name as pool name by default
PoolName: n.Name,
ForcePoolName: ptr.To(false),
PoolType: common.PoolTypeIPPool,
ConfDir: DefaultConfDir,
LogFile: DefaultLogFile,
Expand All @@ -160,6 +164,11 @@ func (cl *confLoader) LoadConf(args *skel.CmdArgs) (*NetConf, error) {
}
cl.overlayConf(defaultConf, n.IPAM)

if !*n.IPAM.ForcePoolName && n.IPAM.PoolName == "" {
// set poolName as network name
n.IPAM.PoolName = n.Name
}

// static IP address priority:
// stdin runtimeConfig["ips"] > stdin args["cni"]["ips"] > IP argument from CNI_ARGS env variable
requestedIPs := n.RuntimeConfig.IPs
Expand Down Expand Up @@ -194,6 +203,7 @@ func (cl *confLoader) LoadConf(args *skel.CmdArgs) (*NetConf, error) {
if err != nil {
return nil, err
}

n.IPAM.PoolType, err = getPoolType(n)
if err != nil {
return nil, err
Expand Down Expand Up @@ -243,6 +253,9 @@ func getPools(n *NetConf) ([]string, error) {
if n.Args != nil && len(n.Args.ArgsCNI.PoolNames) > 0 {
pools = n.Args.ArgsCNI.PoolNames
} else {
if n.IPAM.PoolName == "" {
return nil, fmt.Errorf("no pool provided")
}
pools = strings.Split(n.IPAM.PoolName, ",")
}
if len(pools) > 2 {
Expand Down Expand Up @@ -319,4 +332,8 @@ func (cl *confLoader) overlayConf(from, to *IPAMConf) {
if to.DaemonCallTimeoutSeconds == 0 {
to.DaemonCallTimeoutSeconds = from.DaemonCallTimeoutSeconds
}

if to.ForcePoolName == nil {
to.ForcePoolName = from.ForcePoolName
}
}
25 changes: 25 additions & 0 deletions pkg/cni/types/types_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,31 @@ var _ = Describe("Types Tests", func() {
Expect(err).To(HaveOccurred())
})

It("Fails if CNI stdin data does not contain pool name and forcePoolName is set in config file", func() {
// write config file
confData := `{"forcePoolName": true}`
err := os.WriteFile(path.Join(testConfDir, cniTypes.ConfFileName), []byte(confData), 0o644)
Expect(err).ToNot(HaveOccurred())

// Load CNI config
testConf := fmt.Sprintf(`{"name": "my-net", "ipam": {"confDir": %q}}`, testConfDir)
_, err = cniTypes.NewConfLoader().LoadConf(&skel.CmdArgs{
StdinData: []byte(testConf), Args: testArgs})
Expect(err).To(HaveOccurred())
})

It("Fails if CNI stdin data does not contain pool name and forcePoolName is set", func() {
// write empty config file
err := os.WriteFile(path.Join(testConfDir, cniTypes.ConfFileName), []byte("{}"), 0o644)
Expect(err).ToNot(HaveOccurred())

// Load CNI config
testConf := fmt.Sprintf(`{"name": "my-net", "ipam": {"confDir": %q, "forcePoolName": true}}`, testConfDir)
_, err = cniTypes.NewConfLoader().LoadConf(&skel.CmdArgs{
StdinData: []byte(testConf), Args: testArgs})
Expect(err).To(HaveOccurred())
})

It("Missing metadata arguments", func() {
// write config file
confData := `{"logLevel": "debug", "logFile": "some/path.log"}`
Expand Down

0 comments on commit 52b3daa

Please sign in to comment.