Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
de51759
Add IPv6 public backends for NodeBalancers
komer3 Mar 12, 2026
37c5a01
Document IPv6 NodeBalancer backends
komer3 Mar 12, 2026
a764399
Add Chainsaw test for IPv6 NodeBalancer backends
komer3 Mar 12, 2026
8f6ae70
Ignore local development artifacts
komer3 Mar 12, 2026
1ab3738
Honor IPv6 backend annotation over VPC backends
komer3 Mar 12, 2026
b336921
Use dual-stack CAPL flavor for IPv6 backend validation
komer3 Mar 12, 2026
9ed47f7
Make IPv6 backend e2e service dual-stack
komer3 Mar 12, 2026
de01740
Fix IPv6 backend Chainsaw reachability check
komer3 Mar 12, 2026
d874876
Document dual-stack requirement for IPv6 backends
komer3 Mar 12, 2026
efe810e
Bump CAPL management cluster toolchain
komer3 Mar 12, 2026
bf11070
Pin devbox clusterctl for v1beta2 CAPL
komer3 Mar 12, 2026
791b6a8
Enable IPv6 auto for generated VPC subnets
komer3 Mar 13, 2026
b6e521f
fix ipv6 backend for vpcs
komer3 Mar 13, 2026
c89d74e
Fix IPv6 backend lint and docs wording
komer3 Mar 13, 2026
808822f
Potential fix for pull request finding
komer3 Mar 13, 2026
cbf9d1b
Potential fix for pull request finding
komer3 Mar 13, 2026
c8f4802
Potential fix for pull request finding
komer3 Mar 13, 2026
2cf942e
Apply suggestions from code review
komer3 Mar 13, 2026
74118be
Address IPv6 backend review comments
komer3 Mar 13, 2026
410a4da
Fix IPv6 backend test vet error
komer3 Mar 13, 2026
4735bcf
Potential fix for pull request finding
komer3 Mar 13, 2026
513ac94
Potential fix for pull request finding
komer3 Mar 13, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,8 @@ coverage.txt
junit.xml

.DS_Store

# Local cluster artifacts
capl-cluster-manifests.yaml
*-kubeconfig.yaml
.opencode/
28 changes: 15 additions & 13 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ LOCALBIN ?= $(CACHE_BIN)
DEVBOX_BIN ?= $(DEVBOX_PACKAGES_DIR)/bin
HELM ?= $(LOCALBIN)/helm
HELM_VERSION ?= v3.16.3
CLUSTERCTL ?= $(LOCALBIN)/clusterctl
CLUSTERCTL_VERSION ?= v1.12.2

GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint
GOLANGCI_LINT_NILAWAY ?= $(CACHE_BIN)/golangci-lint-nilaway
Expand All @@ -26,13 +28,13 @@ SUBNET_MANIFEST_NAME ?= subnet-testing-manifests
K8S_VERSION ?= "v1.31.2"

# renovate: datasource=github-tags depName=kubernetes-sigs/cluster-api
CAPI_VERSION ?= "v1.8.5"
CAPI_VERSION ?= "v1.12.3"

# renovate: datasource=github-tags depName=kubernetes-sigs/cluster-api-addon-provider-helm
CAAPH_VERSION ?= "v0.2.1"
CAAPH_VERSION ?= "v0.6.1"

# renovate: datasource=github-tags depName=linode/cluster-api-provider-linode
CAPL_VERSION ?= "v0.8.5"
CAPL_VERSION ?= "v0.10.1"

# renovate: datasource=github-tags depName=golangci/golangci-lint
GOLANGCI_LINT_VERSION ?= "v2.7.2"
Expand Down Expand Up @@ -166,20 +168,20 @@ mgmt-and-capl-cluster: docker-setup mgmt-cluster capl-cluster
capl-cluster: generate-capl-cluster-manifests create-capl-cluster patch-linode-ccm

.PHONY: generate-capl-cluster-manifests
generate-capl-cluster-manifests:
generate-capl-cluster-manifests: clusterctl
# Create the CAPL cluster manifests without any CSI driver stuff
LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) VPC_NAME=$(VPC_NAME) clusterctl generate cluster $(CLUSTER_NAME) \
LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) VPC_NAME=$(VPC_NAME) $(CLUSTERCTL) generate cluster $(CLUSTER_NAME) \
--kubernetes-version $(K8S_VERSION) --infrastructure linode-linode:$(CAPL_VERSION) \
--control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) > $(MANIFEST_NAME).yaml
yq -i e 'select(.kind == "LinodeVPC").spec.subnets = [{"ipv4": "10.0.0.0/8", "label": "default"}, {"ipv4": "172.16.0.0/16", "label": "testing"}]' $(MANIFEST_NAME).yaml
--control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) --flavor kubeadm-dual-stack > $(MANIFEST_NAME).yaml
yq -i e 'select(.kind == "LinodeVPC").spec.ipv6Range = [{"range": "auto"}] | select(.kind == "LinodeVPC").spec.subnets = [{"ipv4": "10.0.0.0/8", "label": "default", "ipv6Range": [{"range": "auto"}]}, {"ipv4": "172.16.0.0/16", "label": "testing", "ipv6Range": [{"range": "auto"}]}]' $(MANIFEST_NAME).yaml

.PHONY: create-capl-cluster
create-capl-cluster:
create-capl-cluster: clusterctl
# Create a CAPL cluster with updated CCM and wait for it to be ready
kubectl apply -f $(MANIFEST_NAME).yaml
kubectl wait --for=condition=ControlPlaneReady cluster/$(CLUSTER_NAME) --timeout=600s || (kubectl get cluster -o yaml; kubectl get linodecluster -o yaml; kubectl get linodemachines -o yaml; kubectl logs -n capl-system deployments/capl-controller-manager --tail=50)
kubectl wait --for=condition=NodeHealthy=true machines -l cluster.x-k8s.io/cluster-name=$(CLUSTER_NAME) --timeout=900s
clusterctl get kubeconfig $(CLUSTER_NAME) > $(KUBECONFIG_PATH)
$(CLUSTERCTL) get kubeconfig $(CLUSTER_NAME) > $(KUBECONFIG_PATH)
KUBECONFIG=$(KUBECONFIG_PATH) kubectl wait --for=condition=Ready nodes --all --timeout=600s
# Remove all taints from control plane node so that pods scheduled on it by tests can run (without this, some tests fail)
KUBECONFIG=$(KUBECONFIG_PATH) kubectl taint nodes -l node-role.kubernetes.io/control-plane node-role.kubernetes.io/control-plane-
Expand All @@ -192,10 +194,10 @@ patch-linode-ccm:
KUBECONFIG=$(KUBECONFIG_PATH) kubectl -n kube-system get daemonset/ccm-linode -o yaml

.PHONY: mgmt-cluster
mgmt-cluster:
mgmt-cluster: clusterctl
# Create a mgmt cluster
ctlptl apply -f e2e/setup/ctlptl-config.yaml
clusterctl init \
$(CLUSTERCTL) init \
--wait-providers \
--wait-provider-timeout 600 \
--core cluster-api:$(CAPI_VERSION) \
Expand Down Expand Up @@ -295,13 +297,13 @@ helm-template: helm
.PHONY: kubectl
kubectl: $(KUBECTL) ## Download kubectl locally if necessary.
$(KUBECTL): $(LOCALBIN)
curl -fsSL https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/$(OS)/$(ARCH_SHORT)/kubectl -o $(KUBECTL)
curl -fsSL https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/$(HOSTOS)/$(ARCH_SHORT)/kubectl -o $(KUBECTL)
chmod +x $(KUBECTL)

.PHONY: clusterctl
clusterctl: $(CLUSTERCTL) ## Download clusterctl locally if necessary.
$(CLUSTERCTL): $(LOCALBIN)
curl -fsSL https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CLUSTERCTL_VERSION)/clusterctl-$(OS)-$(ARCH_SHORT) -o $(CLUSTERCTL)
curl -fsSL https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CLUSTERCTL_VERSION)/clusterctl-$(HOSTOS)-$(ARCH_SHORT) -o $(CLUSTERCTL)
chmod +x $(CLUSTERCTL)

.phony: golangci-lint-nilaway
Expand Down
2 changes: 2 additions & 0 deletions cloud/annotations/annotations.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ const (
// AnnLinodeEnableIPv6Ingress is the annotation used to specify that a service should include both IPv4 and IPv6
// addresses for its LoadBalancer ingress. When set to "true", both addresses will be included in the status.
AnnLinodeEnableIPv6Ingress = "service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress"
// AnnLinodeEnableIPv6Backends controls whether a NodeBalancer service should use public IPv6 backend nodes.
AnnLinodeEnableIPv6Backends = "service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-backends"

AnnLinodeNodePrivateIP = "node.k8s.linode.com/private-ip"
AnnLinodeHostUUID = "node.k8s.linode.com/host-uuid"
Expand Down
99 changes: 75 additions & 24 deletions cloud/linode/loadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,6 @@ func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string,

// EnsureLoadBalancer ensures that the cluster is running a load balancer for
// service.
//
// EnsureLoadBalancer will not modify service or nodes.
func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (lbStatus *v1.LoadBalancerStatus, err error) {
ctx = sentry.SetHubOnContext(ctx)
sentry.SetTag(ctx, "cluster_name", clusterName)
Expand Down Expand Up @@ -469,9 +467,9 @@ func (l *loadbalancers) updateNodeBalancer(
}
}
oldNBNodeIDs := make(map[string]int)
var currentNBNodes []linodego.NodeBalancerNode
if currentNBCfg != nil {
// Obtain list of current NB nodes and convert it to map of node IDs
var currentNBNodes []linodego.NodeBalancerNode
currentNBNodes, err = l.client.ListNodeBalancerNodes(ctx, nb.ID, currentNBCfg.ID, nil)
if err != nil {
// This error can be ignored, because if we fail to get nodes we can anyway rebuild the config from scratch,
Expand All @@ -485,8 +483,8 @@ func (l *loadbalancers) updateNodeBalancer(
} else {
klog.Infof("No preexisting nodebalancer for port %v found.", port.Port)
}

// Add all of the Nodes to the config
newNBNodes := make([]linodego.NodeBalancerConfigRebuildNodeOptions, 0, len(nodes))
subnetID := 0
if options.Options.NodeBalancerBackendIPv4SubnetID != 0 {
subnetID = options.Options.NodeBalancerBackendIPv4SubnetID
Expand All @@ -506,21 +504,14 @@ func (l *loadbalancers) updateNodeBalancer(
}
subnetID = id
}
for _, node := range nodes {
var newNodeOpts *linodego.NodeBalancerConfigRebuildNodeOptions
newNodeOpts, err = l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, newNBCfg.Protocol)
if err != nil {
sentry.CaptureError(ctx, err)
return fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err)
}
oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address]
if ok {
newNodeOpts.ID = oldNodeID
} else {
klog.Infof("No preexisting node id for %v found.", newNodeOpts.Address)
}
newNBNodes = append(newNBNodes, *newNodeOpts)

useIPv6Backends := resolveIPv6NodeBalancerBackendState(service)
newNBNodes, err := l.buildNodeBalancerConfigNodes(service, nodes, port.NodePort, subnetID, useIPv6Backends, newNBCfg.Protocol, oldNBNodeIDs)
if err != nil {
sentry.CaptureError(ctx, err)
return fmt.Errorf("[port %d] error building NodeBalancer backend node configs: %w", int(port.Port), err)
}

// If there's no existing config, create it
var rebuildOpts linodego.NodeBalancerConfigRebuildOptions
if currentNBCfg == nil {
Expand Down Expand Up @@ -582,7 +573,8 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri
serviceWithStatus := service.DeepCopy()
serviceWithStatus.Status.LoadBalancer, err = l.getLatestServiceLoadBalancerStatus(ctx, service)
if err != nil {
return fmt.Errorf("failed to get latest LoadBalancer status for service (%s): %w", getServiceNn(service), err)
klog.Warningf("failed to get latest LoadBalancer status for service (%s), using provided status instead: %v", getServiceNn(service), err)
serviceWithStatus.Status.LoadBalancer = service.Status.LoadBalancer
}

nb, err := l.getNodeBalancerForService(ctx, serviceWithStatus)
Expand Down Expand Up @@ -1157,6 +1149,7 @@ func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam
}
ports := service.Spec.Ports
configs := make([]*linodego.NodeBalancerConfigCreateOptions, 0, len(ports))
useIPv6Backends := resolveIPv6NodeBalancerBackendState(service)

subnetID := 0
if options.Options.NodeBalancerBackendIPv4SubnetID != 0 {
Expand Down Expand Up @@ -1185,7 +1178,7 @@ func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam
createOpt := config.GetCreateOptions()

for _, node := range nodes {
newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, config.Protocol)
newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(service, node, port.NodePort, subnetID, useIPv6Backends, config.Protocol)
if err != nil {
sentry.CaptureError(ctx, err)
return nil, fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err)
Expand All @@ -1210,14 +1203,14 @@ func coerceString(str string, minLen, maxLen int, padding string) string {
return str
}

func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) {
nodeIP, err := getNodePrivateIP(node, subnetID)
func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(service *v1.Service, node *v1.Node, nodePort int32, subnetID int, useIPv6Backends bool, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) {
nodeIP, err := getNodeBackendIP(service, node, subnetID, useIPv6Backends)
if err != nil {
return nil, fmt.Errorf("node %s does not have a private IP address: %w", node.Name, err)
return nil, err
}
nodeOptions := &linodego.NodeBalancerConfigRebuildNodeOptions{
NodeBalancerNodeCreateOptions: linodego.NodeBalancerNodeCreateOptions{
Address: fmt.Sprintf("%v:%v", nodeIP, nodePort),
Address: formatNodeBalancerBackendAddress(nodeIP, nodePort),
// NodeBalancer backends must be 3-32 chars in length
// If < 3 chars, pad node name with "node-" prefix
Label: coerceString(node.Name, 3, 32, "node-"),
Expand All @@ -1234,6 +1227,46 @@ func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node,
return nodeOptions, nil
}

func resolveIPv6NodeBalancerBackendState(service *v1.Service) bool {
useIPv6 := getServiceBoolAnnotation(service, annotations.AnnLinodeEnableIPv6Backends)
if useIPv6 != nil {
return *useIPv6
}

return options.Options.EnableIPv6ForNodeBalancerBackends
}

func formatNodeBalancerBackendAddress(ip string, nodePort int32) string {
return net.JoinHostPort(ip, strconv.Itoa(int(nodePort)))
}

func (l *loadbalancers) buildNodeBalancerConfigNodes(
service *v1.Service,
nodes []*v1.Node,
nodePort int32,
subnetID int,
useIPv6Backends bool,
protocol linodego.ConfigProtocol,
oldNBNodeIDs map[string]int,
) ([]linodego.NodeBalancerConfigRebuildNodeOptions, error) {
newNBNodes := make([]linodego.NodeBalancerConfigRebuildNodeOptions, 0, len(nodes))
for _, node := range nodes {
newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(service, node, nodePort, subnetID, useIPv6Backends, protocol)
if err != nil {
return nil, fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err)
}
oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address]
if ok {
newNodeOpts.ID = oldNodeID
} else {
klog.Infof("No preexisting node id for %v found.", newNodeOpts.Address)
}
newNBNodes = append(newNBNodes, *newNodeOpts)
}

return newNBNodes, nil
}

func (l *loadbalancers) retrieveKubeClient() error {
if l.kubeClient != nil {
return nil
Expand Down Expand Up @@ -1379,6 +1412,24 @@ func getNodePrivateIP(node *v1.Node, subnetID int) (string, error) {
return "", fmt.Errorf("no internal IP found for node %s", node.Name)
}

func getNodeBackendIP(service *v1.Service, node *v1.Node, subnetID int, useIPv6Backends bool) (string, error) {
if !useIPv6Backends {
return getNodePrivateIP(node, subnetID)
}

for _, addr := range node.Status.Addresses {
if addr.Type != v1.NodeExternalIP {
continue
}
if parsed := net.ParseIP(addr.Address); parsed != nil && parsed.To4() == nil {
return addr.Address, nil
}
}

klog.V(4).Infof("Service %s requested IPv6 backends but node %s does not have a public IPv6 address", getServiceNn(service), node.Name)
return "", fmt.Errorf("service %s requested IPv6 backends but node %s does not have a public IPv6 address", getServiceNn(service), node.Name)
}

func getTLSCertInfo(ctx context.Context, kubeClient kubernetes.Interface, namespace string, config portConfig) (string, string, error) {
if config.TLSSecretName == "" {
return "", "", fmt.Errorf("TLS secret name for port %v is not specified", config.Port)
Expand Down
Loading
Loading