Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
240 changes: 238 additions & 2 deletions internal/controller/evpn/fabric_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,22 @@ import (

"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/events"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"

"github.com/ironcore-dev/network-operator/api/core/v1alpha1"
evpnv1alpha1 "github.com/ironcore-dev/network-operator/api/evpn/v1alpha1"
poolv1alpha1 "github.com/ironcore-dev/network-operator/api/pool/v1alpha1"
"github.com/ironcore-dev/network-operator/internal/conditions"
"github.com/ironcore-dev/network-operator/internal/provider"
)
Expand All @@ -36,13 +41,17 @@ type FabricReconciler struct {
// More info: https://book.kubebuilder.io/reference/raising-events
Recorder events.EventRecorder

// Provider is the driver that will be used to create & delete the interface.
// Provider is the driver that will be used to create interfaces.
Provider provider.ProviderFunc
}

// +kubebuilder:rbac:groups=evpn.networking.metal.ironcore.dev,resources=fabrics,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=evpn.networking.metal.ironcore.dev,resources=fabrics/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=evpn.networking.metal.ironcore.dev,resources=fabrics/finalizers,verbs=update
// +kubebuilder:rbac:groups=networking.metal.ironcore.dev,resources=devices,verbs=get;list;watch
// +kubebuilder:rbac:groups=networking.metal.ironcore.dev,resources=interfaces,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=pool.networking.metal.ironcore.dev,resources=claims,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=pool.networking.metal.ironcore.dev,resources=ipaddresspools,verbs=get;list;watch
// +kubebuilder:rbac:groups=events.k8s.io,resources=events,verbs=create;patch

// Reconcile is part of the main kubernetes reconciliation loop which aims to
Expand All @@ -64,6 +73,18 @@ func (r *FabricReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ c
return ctrl.Result{}, err
}

if _, ok := r.Provider().(provider.InterfaceProvider); !ok {
if meta.SetStatusCondition(&fabric.Status.Conditions, metav1.Condition{
Type: v1alpha1.ReadyCondition,
Status: metav1.ConditionFalse,
Reason: v1alpha1.NotImplementedReason,
Message: "Provider does not implement provider.InterfaceProvider",
}) {
return ctrl.Result{}, r.Status().Update(ctx, fabric)
}
return ctrl.Result{}, nil
}

if !fabric.DeletionTimestamp.IsZero() {
if controllerutil.ContainsFinalizer(fabric, evpnv1alpha1.FinalizerName) {
if err := r.finalize(ctx, fabric); err != nil {
Expand Down Expand Up @@ -136,6 +157,15 @@ func (r *FabricReconciler) SetupWithManager(mgr ctrl.Manager) error {

return ctrl.NewControllerManagedBy(mgr).
For(&evpnv1alpha1.Fabric{}).
Owns(&poolv1alpha1.Claim{}).
Owns(&v1alpha1.Interface{}).
// Re-reconcile when a Device's labels change so that devices newly
// matching a deviceSelector are enrolled into the fabric.
Watches(
&v1alpha1.Device{},
handler.EnqueueRequestsFromMapFunc(r.devicesToFabrics),
builder.WithPredicates(predicate.LabelChangedPredicate{}),
).
WithEventFilter(filter).
Named("evpn-fabric").
Complete(r)
Expand All @@ -147,7 +177,9 @@ type ReconcileFunc func(context.Context, *evpnv1alpha1.Fabric) (ctrl.Result, err

func (r *FabricReconciler) reconcile(ctx context.Context, fabric *evpnv1alpha1.Fabric) (ctrl.Result, error) {
phases := []ReconcileFunc{
// r.reconcileNodes,
r.reconcileSystemLoopbacks,
r.reconcileVTEPLoopbacks,
r.reconcileAnycastRPLoopbacks,
}
for _, phase := range phases {
res, err := phase(ctx, fabric)
Expand All @@ -169,3 +201,207 @@ func (r *FabricReconciler) finalize(ctx context.Context, fabric *evpnv1alpha1.Fa
_ = fabric
return nil
}

const (
LoopbackRouterID = 0 // Router-ID and BGP source address, present on all fabric devices
LoopbackVTEP = 1 // Primary VTEP address, present on VTEP devices
LoopbackVTEPAnycast = 2 // Anycast VTEP address, present on VTEP devices (deprecated in favour of ESI)
LoopbackAnycastRP = 100 // PIM anycast rendezvous point address, shared across RP devices
)

// reconcileSystemLoopbacks ensures lo0 (Router-ID / BGP source) exists on every fabric device.
func (r *FabricReconciler) reconcileSystemLoopbacks(ctx context.Context, fabric *evpnv1alpha1.Fabric) (ctrl.Result, error) {
selector, err := metav1.LabelSelectorAsSelector(&fabric.Spec.DeviceSelector)
if err != nil {
return ctrl.Result{}, fmt.Errorf("invalid deviceSelector: %w", err)
}
devices := &v1alpha1.DeviceList{}
if err := r.List(ctx, devices, client.InNamespace(fabric.Namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil {
return ctrl.Result{}, fmt.Errorf("listing fabric devices: %w", err)
}
for i := range devices.Items {
claimName := fmt.Sprintf("%s-%s-lo%d", fabric.Name, devices.Items[i].Name, LoopbackRouterID)
claim, err := r.reconcileLoopbackClaim(ctx, fabric, claimName)
if err != nil {
return ctrl.Result{}, err
}
if err := r.reconcileLoopbackInterface(ctx, fabric, &devices.Items[i], LoopbackRouterID, claim); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}

// reconcileVTEPLoopbacks ensures lo1 (primary VTEP) and lo2 (anycast VTEP) exist on VTEP devices.
func (r *FabricReconciler) reconcileVTEPLoopbacks(ctx context.Context, fabric *evpnv1alpha1.Fabric) (ctrl.Result, error) {
selector, err := metav1.LabelSelectorAsSelector(&fabric.Spec.VTEP.DeviceSelector)
if err != nil {
return ctrl.Result{}, fmt.Errorf("invalid vtep deviceSelector: %w", err)
}
devices := &v1alpha1.DeviceList{}
if err := r.List(ctx, devices, client.InNamespace(fabric.Namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil {
return ctrl.Result{}, fmt.Errorf("listing VTEP devices: %w", err)
}
for i := range devices.Items {
for _, id := range []int{LoopbackVTEP, LoopbackVTEPAnycast} {
claimName := fmt.Sprintf("%s-%s-lo%d", fabric.Name, devices.Items[i].Name, id)
claim, err := r.reconcileLoopbackClaim(ctx, fabric, claimName)
if err != nil {
return ctrl.Result{}, err
}
if err := r.reconcileLoopbackInterface(ctx, fabric, &devices.Items[i], id, claim); err != nil {
return ctrl.Result{}, err
}
}
}
return ctrl.Result{}, nil
}

// reconcileAnycastRPLoopbacks ensures lo100 (PIM anycast RP) exists on RP devices.
// One claim is allocated per AnycastRendezvousPoint group; all RP devices in the group
// share that single address.
func (r *FabricReconciler) reconcileAnycastRPLoopbacks(ctx context.Context, fabric *evpnv1alpha1.Fabric) (ctrl.Result, error) {
if fabric.Spec.BUM.PIM == nil {
return ctrl.Result{}, nil
}
for _, rp := range fabric.Spec.BUM.PIM.AnycastRendezvousPoints {
claimName := fmt.Sprintf("%s-%s-lo%d", fabric.Name, rp.Name, LoopbackAnycastRP)
claim, err := r.reconcileLoopbackClaim(ctx, fabric, claimName)
if err != nil {
return ctrl.Result{}, err
}
selector, err := metav1.LabelSelectorAsSelector(&rp.DeviceSelector)
if err != nil {
return ctrl.Result{}, fmt.Errorf("invalid RP deviceSelector %q: %w", rp.Name, err)
}
devices := &v1alpha1.DeviceList{}
if err := r.List(ctx, devices, client.InNamespace(fabric.Namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil {
return ctrl.Result{}, fmt.Errorf("listing RP devices for %q: %w", rp.Name, err)
}
for i := range devices.Items {
if err := r.reconcileLoopbackInterface(ctx, fabric, &devices.Items[i], LoopbackAnycastRP, claim); err != nil {
return ctrl.Result{}, err
}
}
}
return ctrl.Result{}, nil
}

// reconcileLoopbackClaim ensures a Claim with the given name exists and matches the desired spec.
// Returns the Claim object so callers can pass it directly to reconcileLoopbackInterface.
func (r *FabricReconciler) reconcileLoopbackClaim(ctx context.Context, fabric *evpnv1alpha1.Fabric, claimName string) (*poolv1alpha1.Claim, error) {
claim := &poolv1alpha1.Claim{
ObjectMeta: metav1.ObjectMeta{
Name: claimName,
Namespace: fabric.Namespace,
},
}
res, err := controllerutil.CreateOrPatch(ctx, r.Client, claim, func() error {
claim.Spec = poolv1alpha1.ClaimSpec{
PoolRef: v1alpha1.TypedLocalObjectReference{
APIVersion: poolv1alpha1.GroupVersion.String(),
Kind: "IPAddressPool",
Name: fabric.Spec.Loopbacks.IPAddressPoolRef.Name,
},
}
return controllerutil.SetControllerReference(fabric, claim, r.Scheme)
})
if err != nil {
return nil, fmt.Errorf("reconciling claim %s: %w", claimName, err)
}
if res == controllerutil.OperationResultCreated {
r.Recorder.Eventf(fabric, nil, "Normal", "ClaimCreated", "Reconcile", "Created loopback address claim %s", claimName)
}
return claim, nil
}

// reconcileLoopbackInterface creates or updates the Interface for a given device loopback
// once its Claim is allocated. A no-op if the claim is not yet allocated; the Owns() watch
// on Claim will re-enqueue this Fabric when the pool controller updates the claim status.
func (r *FabricReconciler) reconcileLoopbackInterface(ctx context.Context, fabric *evpnv1alpha1.Fabric, device *v1alpha1.Device, loopbackID int, claim *poolv1alpha1.Claim) error {
cond := conditions.Get(claim, poolv1alpha1.AllocatedCondition)
if cond == nil || cond.Status != metav1.ConditionTrue || claim.Status.Value == "" {
return nil
}

prefix, err := v1alpha1.ParsePrefix(claim.Status.Value + "/32")
if err != nil {
return fmt.Errorf("parsing allocated address %q: %w", claim.Status.Value, err)
}

handle, err := r.Provider().(provider.InterfaceProvider).LoopbackInterfaceName(loopbackID)
if err != nil {
return fmt.Errorf("resolving loopback interface name for id %d: %w", loopbackID, err)
}

name := fmt.Sprintf("%s-%s-%s", fabric.Name, device.Name, handle)
intf := &v1alpha1.Interface{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: fabric.Namespace,
},
}
res, err := controllerutil.CreateOrPatch(ctx, r.Client, intf, func() error {
intf.Spec.DeviceRef = v1alpha1.LocalObjectReference{Name: device.Name}
intf.Spec.Name = handle
intf.Spec.Type = v1alpha1.InterfaceTypeLoopback
intf.Spec.AdminState = v1alpha1.AdminStateUp
switch loopbackID {
case LoopbackRouterID:
intf.Spec.Description = "Router-ID, BGP Source"
case LoopbackVTEP:
intf.Spec.Description = "Primary VTEP"
case LoopbackVTEPAnycast:
intf.Spec.Description = "VTEP Anycast"
case LoopbackAnycastRP:
intf.Spec.Description = "Rendezvous Point"
}
if intf.Spec.IPv4 == nil {
intf.Spec.IPv4 = &v1alpha1.InterfaceIPv4{}
}
if len(intf.Spec.IPv4.Addresses) == 0 || intf.Spec.IPv4.Addresses[0] != prefix {
intf.Spec.IPv4.Addresses = []v1alpha1.IPPrefix{prefix}
}
return controllerutil.SetOwnerReference(fabric, intf, r.Scheme)
})
if err != nil {
return fmt.Errorf("reconciling interface %s: %w", name, err)
}
if res == controllerutil.OperationResultCreated {
r.Recorder.Eventf(fabric, nil, "Normal", "InterfaceCreated", "Reconcile", "Created loopback interface %s", name)
}
return nil
}

// devicesToFabrics is a [handler.MapFunc] that enqueues all Fabrics whose
// spec.deviceSelector matches the labels of the changed Device.
func (r *FabricReconciler) devicesToFabrics(ctx context.Context, obj client.Object) []ctrl.Request {
device, ok := obj.(*v1alpha1.Device)
if !ok {
panic(fmt.Sprintf("Expected a Device but got a %T", obj))
}

log := ctrl.LoggerFrom(ctx)

fabricList := &evpnv1alpha1.FabricList{}
if err := r.List(ctx, fabricList, client.InNamespace(device.Namespace)); err != nil {
log.Error(err, "Failed to list Fabrics")
return nil
}

var requests []ctrl.Request
for _, fabric := range fabricList.Items {
selector, err := metav1.LabelSelectorAsSelector(&fabric.Spec.DeviceSelector)
if err != nil {
log.Error(err, "Failed to parse deviceSelector", "fabric", fabric.Name)
continue
}
if selector.Matches(labels.Set(device.Labels)) {
log.V(2).Info("Enqueuing Fabric for reconciliation", "fabric", fabric.Name)
requests = append(requests, ctrl.Request{
NamespacedName: client.ObjectKeyFromObject(&fabric),
})
}
}
return requests
}
Loading