Extend Project ResourceQuotas to Support All Upstream Kubernetes ResourceQuota Types (#52544)

* extended resource quota limits with map for arbitrary resources
updated conversion func to handle the new map
updated empty check to be epxlicit
note: the map[string]string makes the struct incomparable to go

* fixup: generated files

* fix type casting in handling of anyOther

* fix: missing handling of anyOther in back-conversion (resource list to resourceQuota)
beware: also fixes mishandling of the dotted resources (ex: limits.cpu) by the function

* feat: added basic unit tests for the conversion functions

* address comments: tweak name of new field

* address comment: simplify empty check, by means of reflection

* address comment: extended error message

* tweak name, `Extended` now.

* avoid updates if nothing would change

* fixup: duplicated import

* basic unit tests - setValidated, updateResourceQuota, updateDefaultLimitRange

* address comments
This commit is contained in:
Andreas Kupries
2025-11-13 15:02:13 +01:00
committed by GitHub
parent 7d4bbccc23
commit 0b96bcb64f
9 changed files with 418 additions and 31 deletions

View File

@@ -73,6 +73,13 @@ type ResourceQuotaLimit struct {
// LimitsMemory is the memory limits across all pods in a non-terminal state.
// +optional
LimitsMemory string `json:"limitsMemory,omitempty"`
// Extended contains additional limits a user may wish to impose beyond
// the limits set by the preceding fields. The keys have to be parseable
// as resource names, while the values have to be parseable as resource
// quantities. See also
// https://kubernetes.io/docs/concepts/policy/resource-quotas
Extended map[string]string `json:"extended,omitempty"`
}
// ContainerResourceLimit holds quotas limits for individual containers.

View File

@@ -4519,7 +4519,7 @@ func (in *MetadataUpdate) DeepCopy() *MetadataUpdate {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceResourceQuota) DeepCopyInto(out *NamespaceResourceQuota) {
*out = *in
out.Limit = in.Limit
in.Limit.DeepCopyInto(&out.Limit)
return
}
@@ -6146,8 +6146,8 @@ func (in *ProjectNetworkPolicyStatus) DeepCopy() *ProjectNetworkPolicyStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProjectResourceQuota) DeepCopyInto(out *ProjectResourceQuota) {
*out = *in
out.Limit = in.Limit
out.UsedLimit = in.UsedLimit
in.Limit.DeepCopyInto(&out.Limit)
in.UsedLimit.DeepCopyInto(&out.UsedLimit)
return
}
@@ -6227,12 +6227,12 @@ func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
if in.ResourceQuota != nil {
in, out := &in.ResourceQuota, &out.ResourceQuota
*out = new(ProjectResourceQuota)
**out = **in
(*in).DeepCopyInto(*out)
}
if in.NamespaceDefaultResourceQuota != nil {
in, out := &in.NamespaceDefaultResourceQuota, &out.NamespaceDefaultResourceQuota
*out = new(NamespaceResourceQuota)
**out = **in
(*in).DeepCopyInto(*out)
}
if in.ContainerDefaultResourceLimit != nil {
in, out := &in.ContainerDefaultResourceLimit, &out.ContainerDefaultResourceLimit
@@ -6384,6 +6384,13 @@ func (in *RancherUserNotificationList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceQuotaLimit) DeepCopyInto(out *ResourceQuotaLimit) {
*out = *in
if in.Extended != nil {
in, out := &in.Extended, &out.Extended
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}

View File

@@ -3,6 +3,7 @@ package client
const (
ResourceQuotaLimitType = "resourceQuotaLimit"
ResourceQuotaLimitFieldConfigMaps = "configMaps"
ResourceQuotaLimitFieldExtended = "extended"
ResourceQuotaLimitFieldLimitsCPU = "limitsCpu"
ResourceQuotaLimitFieldLimitsMemory = "limitsMemory"
ResourceQuotaLimitFieldPersistentVolumeClaims = "persistentVolumeClaims"
@@ -18,17 +19,18 @@ const (
)
type ResourceQuotaLimit struct {
ConfigMaps string `json:"configMaps,omitempty" yaml:"configMaps,omitempty"`
LimitsCPU string `json:"limitsCpu,omitempty" yaml:"limitsCpu,omitempty"`
LimitsMemory string `json:"limitsMemory,omitempty" yaml:"limitsMemory,omitempty"`
PersistentVolumeClaims string `json:"persistentVolumeClaims,omitempty" yaml:"persistentVolumeClaims,omitempty"`
Pods string `json:"pods,omitempty" yaml:"pods,omitempty"`
ReplicationControllers string `json:"replicationControllers,omitempty" yaml:"replicationControllers,omitempty"`
RequestsCPU string `json:"requestsCpu,omitempty" yaml:"requestsCpu,omitempty"`
RequestsMemory string `json:"requestsMemory,omitempty" yaml:"requestsMemory,omitempty"`
RequestsStorage string `json:"requestsStorage,omitempty" yaml:"requestsStorage,omitempty"`
Secrets string `json:"secrets,omitempty" yaml:"secrets,omitempty"`
Services string `json:"services,omitempty" yaml:"services,omitempty"`
ServicesLoadBalancers string `json:"servicesLoadBalancers,omitempty" yaml:"servicesLoadBalancers,omitempty"`
ServicesNodePorts string `json:"servicesNodePorts,omitempty" yaml:"servicesNodePorts,omitempty"`
ConfigMaps string `json:"configMaps,omitempty" yaml:"configMaps,omitempty"`
Extended map[string]string `json:"extended,omitempty" yaml:"extended,omitempty"`
LimitsCPU string `json:"limitsCpu,omitempty" yaml:"limitsCpu,omitempty"`
LimitsMemory string `json:"limitsMemory,omitempty" yaml:"limitsMemory,omitempty"`
PersistentVolumeClaims string `json:"persistentVolumeClaims,omitempty" yaml:"persistentVolumeClaims,omitempty"`
Pods string `json:"pods,omitempty" yaml:"pods,omitempty"`
ReplicationControllers string `json:"replicationControllers,omitempty" yaml:"replicationControllers,omitempty"`
RequestsCPU string `json:"requestsCpu,omitempty" yaml:"requestsCpu,omitempty"`
RequestsMemory string `json:"requestsMemory,omitempty" yaml:"requestsMemory,omitempty"`
RequestsStorage string `json:"requestsStorage,omitempty" yaml:"requestsStorage,omitempty"`
Secrets string `json:"secrets,omitempty" yaml:"secrets,omitempty"`
Services string `json:"services,omitempty" yaml:"services,omitempty"`
ServicesLoadBalancers string `json:"servicesLoadBalancers,omitempty" yaml:"servicesLoadBalancers,omitempty"`
ServicesNodePorts string `json:"servicesNodePorts,omitempty" yaml:"servicesNodePorts,omitempty"`
}

View File

@@ -2,6 +2,7 @@ package resourcequota
import (
"encoding/json"
"fmt"
"github.com/rancher/norman/types/convert"
apiv3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
@@ -12,16 +13,27 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
)
const extendedKey = "extended"
func convertResourceListToLimit(rList corev1.ResourceList) (*apiv3.ResourceQuotaLimit, error) {
converted, err := convert.EncodeToMap(rList)
if err != nil {
return nil, err
}
convertedMap := map[string]string{}
extended := map[string]string{}
convertedMap := map[string]any{}
for key, value := range converted {
if val, ok := resourceQuotaReturnConversion[key]; ok {
key = val
} else {
extended[key] = convert.ToString(value)
}
convertedMap[key] = convert.ToString(value)
}
if len(extended) > 0 {
convertedMap[extendedKey] = extended
}
toReturn := &apiv3.ResourceQuotaLimit{}
err = convert.ToObj(convertedMap, toReturn)
@@ -46,13 +58,30 @@ func convertProjectResourceLimitToResourceList(limit *apiv3.ResourceQuotaLimit)
if err != nil {
return nil, err
}
limitsMap := map[string]string{}
limitsMap := map[string]any{}
err = json.Unmarshal(in, &limitsMap)
if err != nil {
return nil, err
}
limits := corev1.ResourceList{}
// convert the arbitrary set first, ...
if extended, ok := limitsMap[extendedKey]; ok {
delete(limitsMap, extendedKey)
for key, value := range extended.(map[string]any) {
resourceName := corev1.ResourceName(key)
resourceQuantity, err := resource.ParseQuantity(value.(string))
if err != nil {
return nil, fmt.Errorf("failed to parse value for key %q: %w", key, err)
}
limits[resourceName] = resourceQuantity
}
}
// then place the fixed data. this order ensures that in case of
// conflicts between arbitrary and fixed data the fixed data wins.
for key, value := range limitsMap {
var resourceName corev1.ResourceName
if val, ok := resourceQuotaConversion[key]; ok {
@@ -61,7 +90,7 @@ func convertProjectResourceLimitToResourceList(limit *apiv3.ResourceQuotaLimit)
resourceName = corev1.ResourceName(key)
}
resourceQuantity, err := resource.ParseQuantity(value)
resourceQuantity, err := resource.ParseQuantity(value.(string))
if err != nil {
return nil, err
}
@@ -138,6 +167,22 @@ var resourceQuotaConversion = map[string]string{
"limitsMemory": "limits.memory",
}
var resourceQuotaReturnConversion = map[string]string{
"configmaps": "configMaps",
"limits.cpu": "limitsCpu",
"limits.memory": "limitsMemory",
"persistentvolumeclaims": "persistentVolumeClaims",
"pods": "pods",
"replicationcontrollers": "replicationControllers",
"requests.cpu": "requestsCpu",
"requests.memory": "requestsMemory",
"requests.storage": "requestsStorage",
"secrets": "secrets",
"services": "services",
"services.loadbalancers": "servicesLoadBalancers",
"services.nodeports": "servicesNodePorts",
}
func getNamespaceResourceQuota(ns *corev1.Namespace) string {
if ns.Annotations == nil {
return ""

View File

@@ -0,0 +1,152 @@
package resourcequota
import (
"testing"
apiv3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
func TestConvertResourceListToLimit(t *testing.T) {
t.Run("convertResourceListToLimit", func(t *testing.T) {
out, err := convertResourceListToLimit(corev1.ResourceList{
"configmaps": resource.MustParse("1"),
"ephemeral-storage": resource.MustParse("14"),
"limits.cpu": resource.MustParse("2"),
"limits.memory": resource.MustParse("3"),
"persistentvolumeclaims": resource.MustParse("4"),
"pods": resource.MustParse("5"),
"replicationcontrollers": resource.MustParse("6"),
"requests.cpu": resource.MustParse("7"),
"requests.memory": resource.MustParse("8"),
"requests.storage": resource.MustParse("9"),
"secrets": resource.MustParse("10"),
"services": resource.MustParse("11"),
"services.loadbalancers": resource.MustParse("12"),
"services.nodeports": resource.MustParse("13"),
})
assert.NoError(t, err)
assert.Equal(t, &apiv3.ResourceQuotaLimit{
ConfigMaps: "1",
LimitsCPU: "2",
LimitsMemory: "3",
PersistentVolumeClaims: "4",
Pods: "5",
ReplicationControllers: "6",
RequestsCPU: "7",
RequestsMemory: "8",
RequestsStorage: "9",
Secrets: "10",
Services: "11",
ServicesLoadBalancers: "12",
ServicesNodePorts: "13",
Extended: map[string]string{
"ephemeral-storage": "14",
},
}, out)
})
}
func TestConvertResourceLimitResourceQuotaSpec(t *testing.T) {
t.Run("convertResourceLimitResourceQuotaSpec", func(t *testing.T) {
out, err := convertResourceLimitResourceQuotaSpec(&apiv3.ResourceQuotaLimit{
ConfigMaps: "1",
LimitsCPU: "2",
LimitsMemory: "3",
PersistentVolumeClaims: "4",
Pods: "5",
ReplicationControllers: "6",
RequestsCPU: "7",
RequestsMemory: "8",
RequestsStorage: "9",
Secrets: "10",
Services: "11",
ServicesLoadBalancers: "12",
ServicesNodePorts: "13",
Extended: map[string]string{
"ephemeral-storage": "14",
},
})
assert.NoError(t, err)
assert.Equal(t, &corev1.ResourceQuotaSpec{
Hard: corev1.ResourceList{
"configmaps": resource.MustParse("1"),
"ephemeral-storage": resource.MustParse("14"),
"limits.cpu": resource.MustParse("2"),
"limits.memory": resource.MustParse("3"),
"persistentvolumeclaims": resource.MustParse("4"),
"pods": resource.MustParse("5"),
"replicationcontrollers": resource.MustParse("6"),
"requests.cpu": resource.MustParse("7"),
"requests.memory": resource.MustParse("8"),
"requests.storage": resource.MustParse("9"),
"secrets": resource.MustParse("10"),
"services": resource.MustParse("11"),
"services.loadbalancers": resource.MustParse("12"),
"services.nodeports": resource.MustParse("13"),
},
}, out)
})
}
func TestConvertProjectResourceLimitToResourceList(t *testing.T) {
t.Run("convertProjectResourceLimitToResourceList", func(t *testing.T) {
out, err := convertProjectResourceLimitToResourceList(&apiv3.ResourceQuotaLimit{
ConfigMaps: "1",
LimitsCPU: "2",
LimitsMemory: "3",
PersistentVolumeClaims: "4",
Pods: "5",
ReplicationControllers: "6",
RequestsCPU: "7",
RequestsMemory: "8",
RequestsStorage: "9",
Secrets: "10",
Services: "11",
ServicesLoadBalancers: "12",
ServicesNodePorts: "13",
Extended: map[string]string{
"ephemeral-storage": "14",
},
})
assert.NoError(t, err)
assert.Equal(t, corev1.ResourceList{
"configmaps": resource.MustParse("1"),
"ephemeral-storage": resource.MustParse("14"),
"limits.cpu": resource.MustParse("2"),
"limits.memory": resource.MustParse("3"),
"persistentvolumeclaims": resource.MustParse("4"),
"pods": resource.MustParse("5"),
"replicationcontrollers": resource.MustParse("6"),
"requests.cpu": resource.MustParse("7"),
"requests.memory": resource.MustParse("8"),
"requests.storage": resource.MustParse("9"),
"secrets": resource.MustParse("10"),
"services": resource.MustParse("11"),
"services.loadbalancers": resource.MustParse("12"),
"services.nodeports": resource.MustParse("13"),
}, out)
})
}
func TestConvertContainerResourceLimitToResourceList(t *testing.T) {
t.Run("convertContainerResourceLimitToResourceList", func(t *testing.T) {
requests, limits, err := convertContainerResourceLimitToResourceList(&apiv3.ContainerResourceLimit{
LimitsCPU: "2",
LimitsMemory: "3",
RequestsCPU: "7",
RequestsMemory: "8",
})
assert.NoError(t, err)
assert.Equal(t, corev1.ResourceList{
"cpu": resource.MustParse("7"),
"memory": resource.MustParse("8"),
}, requests)
assert.Equal(t, corev1.ResourceList{
"cpu": resource.MustParse("2"),
"memory": resource.MustParse("3"),
}, limits)
})
}

View File

@@ -170,9 +170,12 @@ func (c *SyncController) CreateResourceQuota(ns *corev1.Namespace) (runtime.Obje
case "delete":
updatedNs := ns.DeepCopy()
delete(updatedNs.Annotations, resourceQuotaAnnotation)
updatedNs, err = c.Namespaces.Update(updatedNs)
if err != nil {
return updatedNs, err
// avoid updates if nothing would change
if !reflect.DeepEqual(updatedNs, ns) {
updatedNs, err = c.Namespaces.Update(updatedNs)
if err != nil {
return updatedNs, err
}
}
operationErr = c.deleteResourceQuota(existing)
}
@@ -192,10 +195,18 @@ func (c *SyncController) CreateResourceQuota(ns *corev1.Namespace) (runtime.Obje
}
toUpdate := updated.DeepCopy()
namespaceutil.SetNamespaceCondition(toUpdate, time.Second*1, ResourceQuotaInitCondition, true, "")
// avoid updates if nothing would change
if reflect.DeepEqual(toUpdate, updated) {
return updated, nil
}
return c.Namespaces.Update(toUpdate)
}
func (c *SyncController) updateResourceQuota(quota *corev1.ResourceQuota, spec *corev1.ResourceQuotaSpec) error {
// avoid updates if nothing would change
if reflect.DeepEqual(quota.Spec, *spec) {
return nil
}
toUpdate := quota.DeepCopy()
toUpdate.Spec = *spec
logrus.Infof("Updating default resource quota for namespace %v", toUpdate.Namespace)
@@ -204,6 +215,10 @@ func (c *SyncController) updateResourceQuota(quota *corev1.ResourceQuota, spec *
}
func (c *SyncController) updateDefaultLimitRange(limitRange *corev1.LimitRange, spec *corev1.LimitRangeSpec) error {
// avoid updates if nothing would change
if reflect.DeepEqual(limitRange.Spec, *spec) {
return nil
}
toUpdate := limitRange.DeepCopy()
toUpdate.Spec = *spec
logrus.Infof("Updating default limit range for namespace %v", toUpdate.Namespace)
@@ -338,9 +353,12 @@ func (c *SyncController) validateAndSetNamespaceQuota(ns *corev1.Namespace, quot
return false, ns, nil, err
}
updatedNs.Annotations[resourceQuotaAnnotation] = string(b)
updatedNs, err = c.Namespaces.Update(updatedNs)
if err != nil {
return false, updatedNs, nil, err
// avoid updates if nothing would change
if !reflect.DeepEqual(updatedNs, ns) {
updatedNs, err = c.Namespaces.Update(updatedNs)
if err != nil {
return false, updatedNs, nil, err
}
}
}
@@ -395,6 +413,10 @@ func (c *SyncController) setValidated(ns *corev1.Namespace, value bool, msg stri
if err := namespaceutil.SetNamespaceCondition(toUpdate, time.Second*1, ResourceQuotaValidatedCondition, value, msg); err != nil {
return ns, err
}
// avoid updates if nothing would change
if reflect.DeepEqual(toUpdate, ns) {
return ns, nil
}
return c.Namespaces.Update(toUpdate)
}

View File

@@ -1,17 +1,135 @@
package resourcequota
import (
"go.uber.org/mock/gomock"
"reflect"
"testing"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/rancher/wrangler/v3/pkg/generic/fake"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
)
func TestSetValidated(t *testing.T) {
t.Run("setup changes, second identical not", func(t *testing.T) {
ctrl := gomock.NewController(t)
nsMock := fake.NewMockNonNamespacedControllerInterface[*corev1.Namespace, *corev1.NamespaceList](ctrl)
nsMock.EXPECT().Update(gomock.Any()).DoAndReturn(func(ns *corev1.Namespace) (*corev1.Namespace, error) {
return ns, nil
}).Times(1)
sc := SyncController{Namespaces: nsMock}
// setup of the condition, single call to client
ns := &corev1.Namespace{}
ns, err := sc.setValidated(ns, true, "test")
assert.NotNil(t, ns)
assert.NoError(t, err)
// second call makes no difference, does not call client
_, err = sc.setValidated(ns, true, "test")
assert.NoError(t, err)
})
}
func TestUpdateResourceQuota(t *testing.T) {
specA := corev1.ResourceQuotaSpec{
Hard: corev1.ResourceList{
"configmaps": resource.MustParse("1"),
},
}
specB := corev1.ResourceQuotaSpec{
Hard: corev1.ResourceList{
"configmaps": resource.MustParse("1"),
"ephemeral-storage": resource.MustParse("14"),
},
}
t.Run("no update if no change", func(t *testing.T) {
ctrl := gomock.NewController(t)
rqMock := fake.NewMockControllerInterface[*corev1.ResourceQuota, *corev1.ResourceQuotaList](ctrl)
rqMock.EXPECT().Update(gomock.Any()).Return(nil, nil).Times(0)
sc := SyncController{ResourceQuotas: rqMock}
err := sc.updateResourceQuota(&corev1.ResourceQuota{Spec: specA}, &specA)
assert.NoError(t, err)
})
t.Run("update for changes", func(t *testing.T) {
ctrl := gomock.NewController(t)
rqMock := fake.NewMockControllerInterface[*corev1.ResourceQuota, *corev1.ResourceQuotaList](ctrl)
rqMock.EXPECT().Update(gomock.Any()).Return(nil, nil)
sc := SyncController{ResourceQuotas: rqMock}
err := sc.updateResourceQuota(&corev1.ResourceQuota{Spec: specA}, &specB)
assert.NoError(t, err)
})
}
func TestUpdateDefaultLimitRange(t *testing.T) {
specA := corev1.LimitRangeSpec{
Limits: []corev1.LimitRangeItem{
{
Type: corev1.LimitTypePod,
Default: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI),
},
DefaultRequest: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI),
},
},
},
}
specB := corev1.LimitRangeSpec{
Limits: []corev1.LimitRangeItem{
{
Type: corev1.LimitTypePod,
Default: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI),
},
DefaultRequest: corev1.ResourceList{
corev1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI),
},
},
{
Type: corev1.LimitTypePod,
Default: corev1.ResourceList{
corev1.ResourceMemory: *resource.NewQuantity(1, resource.DecimalSI),
},
DefaultRequest: corev1.ResourceList{
corev1.ResourceMemory: *resource.NewQuantity(1, resource.DecimalSI),
},
},
},
}
t.Run("no update if no change", func(t *testing.T) {
ctrl := gomock.NewController(t)
lrMock := fake.NewMockControllerInterface[*corev1.LimitRange, *corev1.LimitRangeList](ctrl)
lrMock.EXPECT().Update(gomock.Any()).Return(nil, nil).Times(0)
sc := SyncController{LimitRange: lrMock}
err := sc.updateDefaultLimitRange(&corev1.LimitRange{Spec: specA}, &specA)
assert.NoError(t, err)
})
t.Run("update for changes", func(t *testing.T) {
ctrl := gomock.NewController(t)
lrMock := fake.NewMockControllerInterface[*corev1.LimitRange, *corev1.LimitRangeList](ctrl)
lrMock.EXPECT().Update(gomock.Any()).Return(nil, nil)
sc := SyncController{LimitRange: lrMock}
err := sc.updateDefaultLimitRange(&corev1.LimitRange{Spec: specA}, &specB)
assert.NoError(t, err)
})
}
func TestCompleteLimit(t *testing.T) {
type input struct {
nsValues *v32.ContainerResourceLimit

View File

@@ -2,6 +2,7 @@ package resourcequota
import (
"fmt"
"reflect"
apiv3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
wmgmtv3 "github.com/rancher/rancher/pkg/generated/controllers/management.cattle.io/v3"
@@ -35,15 +36,14 @@ func (r *reconcileController) reconcileNamespaces(_ string, p *apiv3.Project) (r
// With no namespaces used-limit has to be empty because there is
// nothing which can be used without namespaces. Therefore squash
// non-empty used-limits, if present.
empty := apiv3.ResourceQuotaLimit{}
if len(namespaces) == 0 &&
p.Spec.ResourceQuota != nil &&
p.Spec.ResourceQuota.UsedLimit != empty {
!isEmpty(&p.Spec.ResourceQuota.UsedLimit) {
logrus.Warnf("project %q, clearing bogus used-limit", p.Name)
newP := p.DeepCopy()
newP.Spec.ResourceQuota.UsedLimit = empty
newP.Spec.ResourceQuota.UsedLimit = apiv3.ResourceQuotaLimit{}
_, err := r.projects.Update(newP)
if err != nil {
logrus.Errorf("project %q, clearing bogus used-limit failed: %q", p.Name, err)
@@ -57,3 +57,7 @@ func (r *reconcileController) reconcileNamespaces(_ string, p *apiv3.Project) (r
}
return nil, nil
}
func isEmpty(rql *apiv3.ResourceQuotaLimit) bool {
return reflect.ValueOf(rql).IsZero()
}

View File

@@ -96,6 +96,16 @@ spec:
description: ConfigMaps is the total number of ReplicationControllers
that can exist in the namespace.
type: string
extended:
additionalProperties:
type: string
description: |-
Extended contains additional limits a user may wish to impose beyond
the limits set by the preceding fields. The keys have to be parseable
as resource names, while the values have to be parseable as resource
quantities. See also
https://kubernetes.io/docs/concepts/policy/resource-quotas
type: object
limitsCpu:
description: LimitsCPU is the CPU limits across all pods in
a non-terminal state.
@@ -161,6 +171,16 @@ spec:
description: ConfigMaps is the total number of ReplicationControllers
that can exist in the namespace.
type: string
extended:
additionalProperties:
type: string
description: |-
Extended contains additional limits a user may wish to impose beyond
the limits set by the preceding fields. The keys have to be parseable
as resource names, while the values have to be parseable as resource
quantities. See also
https://kubernetes.io/docs/concepts/policy/resource-quotas
type: object
limitsCpu:
description: LimitsCPU is the CPU limits across all pods in
a non-terminal state.
@@ -219,6 +239,16 @@ spec:
description: ConfigMaps is the total number of ReplicationControllers
that can exist in the namespace.
type: string
extended:
additionalProperties:
type: string
description: |-
Extended contains additional limits a user may wish to impose beyond
the limits set by the preceding fields. The keys have to be parseable
as resource names, while the values have to be parseable as resource
quantities. See also
https://kubernetes.io/docs/concepts/policy/resource-quotas
type: object
limitsCpu:
description: LimitsCPU is the CPU limits across all pods in
a non-terminal state.