Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 15 additions & 6 deletions pkg/client/fake/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -802,12 +802,21 @@ func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.Upd
c.trackerWriteLock.Lock()
defer c.trackerWriteLock.Unlock()

// Retain managed fields
// We can ignore all errors here since update will fail if we encounter an error.
obj.SetManagedFields(nil)
current, _ := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName())
if currentMetaObj, ok := current.(metav1.Object); ok {
obj.SetManagedFields(currentMetaObj.GetManagedFields())
if isStatus {
// Disallow updates of managed fields directly on the (status) subresource.
//
// Note that managed fields tracking on the subresources other than status is
// broken anyway and this place is one of those that will need to change when
// proper support for subresource managed fields tracking is introduced.
//
// By re-reading the managed fields here, we at least retain the main resource's
// managed fields in case the subresource uses the main resource's body for
// the update (see fakeSubResourceClient.Update).
obj.SetManagedFields(nil)
current, _ := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName())
if currentMetaObj, ok := current.(metav1.Object); ok {
obj.SetManagedFields(currentMetaObj.GetManagedFields())
}
}

if err := c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false, *updateOptions.AsUpdateOptions()); err != nil {
Expand Down
130 changes: 123 additions & 7 deletions pkg/client/fake/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ import (
const (
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is a compile failure in here

machineIDFromStatusUpdate = "machine-id-from-status-update"
cidrFromStatusUpdate = "cidr-from-status-update"
testOwner = "test-owner"
)

var _ = Describe("Fake client", func() {
Expand Down Expand Up @@ -1823,7 +1824,7 @@ var _ = Describe("Fake client", func() {
WithSpec(corev1applyconfigurations.NodeSpec().WithPodCIDR(initial.Spec.PodCIDR + "-updated")).
WithStatus(corev1applyconfigurations.NodeStatus().WithPhase(corev1.NodeRunning))

Expect(cl.Status().Apply(ctx, ac, client.FieldOwner("test-owner"))).To(Succeed())
Expect(cl.Status().Apply(ctx, ac, client.FieldOwner(testOwner))).To(Succeed())

actual := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: initial.Name}}
Expect(cl.Get(ctx, client.ObjectKeyFromObject(actual), actual)).To(Succeed())
Expand All @@ -1838,12 +1839,12 @@ var _ = Describe("Fake client", func() {
cl := NewClientBuilder().WithStatusSubresource(&corev1.Node{}).Build()
node := corev1applyconfigurations.Node("a-node").
WithSpec(corev1applyconfigurations.NodeSpec().WithPodCIDR("some-value"))
Expect(cl.Apply(ctx, node, client.FieldOwner("test-owner"))).To(Succeed())
Expect(cl.Apply(ctx, node, client.FieldOwner(testOwner))).To(Succeed())

node = node.
WithStatus(corev1applyconfigurations.NodeStatus().WithPhase(corev1.NodeRunning))

Expect(cl.Status().Apply(ctx, node, client.FieldOwner("test-owner"))).To(Succeed())
Expect(cl.Status().Apply(ctx, node, client.FieldOwner(testOwner))).To(Succeed())
})

It("should allow SSA apply on status without object has changed issues", func(ctx SpecContext) {
Expand Down Expand Up @@ -1876,7 +1877,7 @@ var _ = Describe("Fake client", func() {

resourceAC := client.ApplyConfigurationFromUnstructured(resourceForApply)

err := cl.Status().Apply(ctx, resourceAC, client.FieldOwner("test-owner"), client.ForceOwnership)
err := cl.Status().Apply(ctx, resourceAC, client.FieldOwner(testOwner), client.ForceOwnership)
Expect(err).NotTo(HaveOccurred(), "SSA apply on status should succeed when resourceVersion is not set")

// Verify the status was applied
Expand Down Expand Up @@ -1925,7 +1926,7 @@ var _ = Describe("Fake client", func() {
resourceAC := client.ApplyConfigurationFromUnstructured(resourceForApply)

// This is expected to fail with the wrong rv value passed in in the applied config
err := cl.Status().Apply(ctx, resourceAC, client.FieldOwner("test-owner"), client.ForceOwnership)
err := cl.Status().Apply(ctx, resourceAC, client.FieldOwner(testOwner), client.ForceOwnership)
Expect(err).To(HaveOccurred(), "SSA apply on status should not succeed when resourceVersion is wrongly set")
Expect(apierrors.IsConflict(err)).To(BeTrue())
})
Expand All @@ -1952,6 +1953,62 @@ var _ = Describe("Fake client", func() {
Expect(node.ManagedFields[1].Manager).To(Equal("status-owner"))
})

// this is not working properly and can't without a larger change to the codebase
PIt("should not be able to manually update the managed fields through a subresource create,update", func(ctx SpecContext) {
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So this is a regression?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, not a regression. I think this never really worked and I'm not 100% sure about the full extent of changes that would make this work (e.g. the special handling of the scale subresource in update, ...). That's why I asked in the issue description if you even want this test in this PR.

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This test is extremely confusing and tests two distinct things. It doesn't actually test that scale requests can not manipulate the objects managed fields, it tests that objects managed fields account correctly for scale, which is a different thing.

Please write a test instead that validates that setting managed fields on a scale request does not result in a change of the objects managed fields.

// test with update
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "dep",
Namespace: "default",
},
}
cl := NewClientBuilder().WithObjects(dep).WithReturnManagedFields().Build()

scale := &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: "funnyScale",
Namespace: "default",
ManagedFields: []metav1.ManagedFieldsEntry{},
},
Spec: autoscalingv1.ScaleSpec{Replicas: 15},
}

dep.Spec.Replicas = ptr.To(int32(2))
Expect(cl.Update(ctx, dep, client.FieldOwner("depManager"))).To(Succeed())

Expect(cl.SubResource("scale").
Update(ctx, dep, client.WithSubResourceBody(scale), client.FieldOwner("scaleManager"))).
To(Succeed())
Expect(dep.ManagedFields).To(HaveLen(2))
Expect(dep.ManagedFields[0].Manager).To(Equal("depManager"))
Expect(dep.ManagedFields[1].Manager).To(Equal("scaleManager"))
Expect(dep.ManagedFields[1].Subresource).To(Equal("scale"))

// test with create
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod",
Namespace: "default",
},
}
Expect(cl.Create(ctx, pod, client.FieldOwner("podManager"))).To(Succeed())

binding := &corev1.Binding{
ObjectMeta: metav1.ObjectMeta{
ManagedFields: []metav1.ManagedFieldsEntry{},
},
Target: corev1.ObjectReference{Name: "the-node"},
}

Expect(cl.SubResource("binding").
Create(ctx, pod, binding, client.FieldOwner("bindingManager"))).
To(Succeed())
Expect(dep.ManagedFields).To(HaveLen(2))
Expect(dep.ManagedFields[0].Manager).To(Equal("podManager"))
Expect(dep.ManagedFields[1].Manager).To(Equal("bindingManager"))
Expect(dep.ManagedFields[1].Subresource).To(Equal("binding"))
})

It("should Unmarshal the schemaless object with int64 to preserve ints", func(ctx SpecContext) {
gv := schema.GroupVersion{Group: "test", Version: "v1"}
scheme := runtime.NewScheme()
Expand Down Expand Up @@ -3121,7 +3178,7 @@ var _ = Describe("Fake client", func() {
})

It("sets the fieldManager in create, patch and update", func(ctx SpecContext) {
owner := "test-owner"
owner := testOwner
cl := client.WithFieldOwner(
NewClientBuilder().WithReturnManagedFields().Build(),
owner,
Expand Down Expand Up @@ -3155,7 +3212,7 @@ var _ = Describe("Fake client", func() {
})

It("sets the fieldManager when creating through update", func(ctx SpecContext) {
owner := "test-owner"
owner := testOwner
cl := client.WithFieldOwner(
NewClientBuilder().WithReturnManagedFields().Build(),
owner,
Expand All @@ -3168,6 +3225,65 @@ var _ = Describe("Fake client", func() {
}
})

// GH-3484
It("respects the ManagedFields during create, update, merge patch", func(ctx SpecContext) {
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please write a testcase per operation

cl := NewClientBuilder().WithReturnManagedFields().Build()
fieldV1Map := map[string]any{
"f:metadata": map[string]any{
"f:name": map[string]any{},
"f:labels": map[string]any{},
"f:annotations": map[string]any{},
"f:finalizers": map[string]any{},
},
}
fieldV1, err := json.Marshal(fieldV1Map)
Expect(err).NotTo(HaveOccurred())

obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{
Name: "cm-1",
Namespace: "default",
ManagedFields: []metav1.ManagedFieldsEntry{{
Manager: testOwner,
Operation: metav1.ManagedFieldsOperationUpdate,
FieldsType: "FieldsV1",
FieldsV1: &metav1.FieldsV1{Raw: fieldV1},
APIVersion: "v1",
}},
}}
Expect(cl.Create(ctx, obj)).To(Succeed())

persisted := &corev1.ConfigMap{}
Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), persisted)).To(Succeed())

Expect(persisted.ManagedFields).To(HaveLen(1))
Expect(persisted.ManagedFields[0].Manager).To(Equal(testOwner))
Expect(persisted.ManagedFields[0].Operation).To(Equal(metav1.ManagedFieldsOperationUpdate))

// This is similar to what e.g. csaupgrade package from client-go does.
obj = persisted
obj.ManagedFields[0].Manager = "updated-manager"
obj.ManagedFields[0].Operation = metav1.ManagedFieldsOperationApply
Expect(cl.Update(ctx, obj)).To(Succeed())

persisted = &corev1.ConfigMap{}
Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), persisted)).To(Succeed())
Expect(persisted.ManagedFields).To(HaveLen(1))
Expect(persisted.ManagedFields[0].Manager).To(Equal("updated-manager"))
Expect(persisted.ManagedFields[0].Operation).To(Equal(metav1.ManagedFieldsOperationApply))

// and the same thing using a merge patch
obj = persisted.DeepCopy()
obj.ManagedFields[0].Manager = "updated-manager-using-patch"
obj.ManagedFields[0].Operation = metav1.ManagedFieldsOperationUpdate
Expect(cl.Patch(ctx, obj, client.MergeFrom(persisted))).To(Succeed())

persisted = &corev1.ConfigMap{}
Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), persisted)).To(Succeed())
Expect(persisted.ManagedFields).To(HaveLen(1))
Expect(persisted.ManagedFields[0].Manager).To(Equal("updated-manager-using-patch"))
Expect(persisted.ManagedFields[0].Operation).To(Equal(metav1.ManagedFieldsOperationUpdate))
})

// GH-3267
It("Doesn't leave stale data when updating an object through SSA", func(ctx SpecContext) {
obj := corev1applyconfigurations.
Expand Down