Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(k8sclusterreceiver): 🔥 remove deprecated kubernetes API resources #26516

Merged
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
c74fc14
feat(k8sclusterreceiver): ✨ introduce ignore_deprecated_resource config
prashant-shahi Sep 6, 2023
0f7bd1f
docs(k8scluster): 📝 update README and example config
prashant-shahi Sep 7, 2023
6c2e5cb
Merge branch 'main' into chore/flag-autoscaling-v2beta2
prashant-shahi Sep 7, 2023
1389666
chore: 🚚 rename ignore_deprecated_resource to ignore_deprecated_resou…
prashant-shahi Sep 8, 2023
e1d1ae6
docs(k8scluster): 📝 update changelog
prashant-shahi Sep 8, 2023
1528480
Merge branch 'main' into chore/flag-autoscaling-v2beta2
prashant-shahi Sep 8, 2023
c1e5f70
Merge branch 'main' into chore/flag-autoscaling-v2beta2
prashant-shahi Sep 8, 2023
d421469
refactor(k8sclusterreceiver): :recycle: remove deprecated APIs and re…
prashant-shahi Sep 9, 2023
00ebf82
Merge branch 'main' into chore/flag-autoscaling-v2beta2
prashant-shahi Sep 9, 2023
97e1d40
docs(k8sclusterreceiver): 📝 update readme docs
prashant-shahi Sep 9, 2023
a6622b2
docs(k8scluster): 📝 update changelog
prashant-shahi Sep 9, 2023
3eca59a
Merge branch 'main' into chore/flag-autoscaling-v2beta2
prashant-shahi Sep 11, 2023
ee9e5d7
docs(k8scluster): 📝 update changelog generator config instead
prashant-shahi Sep 11, 2023
7a9bea3
docs(k8scluster): 📝 update changelog
prashant-shahi Sep 11, 2023
fb9a091
chore: update .chloggen/k8sclusterreceiver-remove-deprecated-resource…
prashant-shahi Sep 11, 2023
b8e0fbb
chore: update .chloggen/k8sclusterreceiver-remove-deprecated-resource…
prashant-shahi Sep 11, 2023
8ace748
Merge branch 'main' into chore/flag-autoscaling-v2beta2
prashant-shahi Sep 11, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .chloggen/k8sclusterreceiver-remove-deprecated-resources.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: "deprecation"
prashant-shahi marked this conversation as resolved.
Show resolved Hide resolved

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: k8sclusterreceiver

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: "Remove deprecated Kubernetes API resources"

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [23612,26551]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
prashant-shahi marked this conversation as resolved.
Show resolved Hide resolved

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: [user]
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@ import (
"go.opentelemetry.io/collector/receiver"
appsv1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"

"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/clusterresourcequota"
Expand Down Expand Up @@ -99,15 +97,9 @@ func (dc *DataCollector) CollectMetricData(currentTime time.Time) pmetric.Metric
dc.metadataStore.ForEach(gvk.CronJob, func(o any) {
cronjob.RecordMetrics(dc.metricsBuilder, o.(*batchv1.CronJob), ts)
})
dc.metadataStore.ForEach(gvk.CronJobBeta, func(o any) {
cronjob.RecordMetricsBeta(dc.metricsBuilder, o.(*batchv1beta1.CronJob), ts)
})
dc.metadataStore.ForEach(gvk.HorizontalPodAutoscaler, func(o any) {
hpa.RecordMetrics(dc.metricsBuilder, o.(*autoscalingv2.HorizontalPodAutoscaler), ts)
})
dc.metadataStore.ForEach(gvk.HorizontalPodAutoscalerBeta, func(o any) {
hpa.RecordMetricsBeta(dc.metricsBuilder, o.(*autoscalingv2beta2.HorizontalPodAutoscaler), ts)
})
dc.metadataStore.ForEach(gvk.ClusterResourceQuota, func(o any) {
clusterresourcequota.RecordMetrics(dc.metricsBuilder, o.(*quotav1.ClusterResourceQuota), ts)
})
Expand Down
18 changes: 0 additions & 18 deletions receiver/k8sclusterreceiver/internal/cronjob/cronjobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ package cronjob // import "github.com/open-telemetry/opentelemetry-collector-con
import (
"go.opentelemetry.io/collector/pdata/pcommon"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"

"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/constants"
Expand All @@ -30,26 +29,9 @@ func RecordMetrics(mb *metadata.MetricsBuilder, cj *batchv1.CronJob, ts pcommon.
mb.EmitForResource(metadata.WithResource(rb.Emit()))
}

func RecordMetricsBeta(mb *metadata.MetricsBuilder, cj *batchv1beta1.CronJob, ts pcommon.Timestamp) {
mb.RecordK8sCronjobActiveJobsDataPoint(ts, int64(len(cj.Status.Active)))
rb := mb.NewResourceBuilder()
rb.SetK8sNamespaceName(cj.Namespace)
rb.SetK8sCronjobUID(string(cj.UID))
rb.SetK8sCronjobName(cj.Name)
rb.SetOpencensusResourcetype("k8s")
mb.EmitForResource(metadata.WithResource(rb.Emit()))
}

func GetMetadata(cj *batchv1.CronJob) map[experimentalmetricmetadata.ResourceID]*metadata.KubernetesMetadata {
rm := metadata.GetGenericMetadata(&cj.ObjectMeta, constants.K8sKindCronJob)
rm.Metadata[cronJobKeySchedule] = cj.Spec.Schedule
rm.Metadata[cronJobKeyConcurrencyPolicy] = string(cj.Spec.ConcurrencyPolicy)
return map[experimentalmetricmetadata.ResourceID]*metadata.KubernetesMetadata{experimentalmetricmetadata.ResourceID(cj.UID): rm}
}

func GetMetadataBeta(cj *batchv1beta1.CronJob) map[experimentalmetricmetadata.ResourceID]*metadata.KubernetesMetadata {
rm := metadata.GetGenericMetadata(&cj.ObjectMeta, constants.K8sKindCronJob)
rm.Metadata[cronJobKeySchedule] = cj.Spec.Schedule
rm.Metadata[cronJobKeyConcurrencyPolicy] = string(cj.Spec.ConcurrencyPolicy)
return map[experimentalmetricmetadata.ResourceID]*metadata.KubernetesMetadata{experimentalmetricmetadata.ResourceID(cj.UID): rm}
}
30 changes: 14 additions & 16 deletions receiver/k8sclusterreceiver/internal/gvk/gvk.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,20 +7,18 @@ import "k8s.io/apimachinery/pkg/runtime/schema"

// Kubernetes group version kinds
var (
Pod = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}
Node = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
Namespace = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}
ReplicationController = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"}
ResourceQuota = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ResourceQuota"}
Service = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"}
DaemonSet = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"}
Deployment = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}
ReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}
StatefulSet = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}
Job = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}
CronJob = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "CronJob"}
CronJobBeta = schema.GroupVersionKind{Group: "batch", Version: "v1beta1", Kind: "CronJob"}
HorizontalPodAutoscaler = schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscaler"}
HorizontalPodAutoscalerBeta = schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta2", Kind: "HorizontalPodAutoscaler"}
ClusterResourceQuota = schema.GroupVersionKind{Group: "quota", Version: "v1", Kind: "ClusterResourceQuota"}
Pod = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}
Node = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
Namespace = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}
ReplicationController = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"}
ResourceQuota = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ResourceQuota"}
Service = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"}
DaemonSet = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"}
Deployment = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}
ReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}
StatefulSet = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}
Job = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}
CronJob = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "CronJob"}
HorizontalPodAutoscaler = schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: "HorizontalPodAutoscaler"}
ClusterResourceQuota = schema.GroupVersionKind{Group: "quota", Version: "v1", Kind: "ClusterResourceQuota"}
)
19 changes: 0 additions & 19 deletions receiver/k8sclusterreceiver/internal/hpa/hpa.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,11 @@ package hpa // import "github.com/open-telemetry/opentelemetry-collector-contrib
import (
"go.opentelemetry.io/collector/pdata/pcommon"
autoscalingv2 "k8s.io/api/autoscaling/v2"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"

"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/metadata"
)

func RecordMetricsBeta(mb *metadata.MetricsBuilder, hpa *autoscalingv2beta2.HorizontalPodAutoscaler, ts pcommon.Timestamp) {
mb.RecordK8sHpaMaxReplicasDataPoint(ts, int64(hpa.Spec.MaxReplicas))
mb.RecordK8sHpaMinReplicasDataPoint(ts, int64(*hpa.Spec.MinReplicas))
mb.RecordK8sHpaCurrentReplicasDataPoint(ts, int64(hpa.Status.CurrentReplicas))
mb.RecordK8sHpaDesiredReplicasDataPoint(ts, int64(hpa.Status.DesiredReplicas))
rb := mb.NewResourceBuilder()
rb.SetK8sHpaUID(string(hpa.UID))
rb.SetK8sHpaName(hpa.Name)
rb.SetK8sNamespaceName(hpa.Namespace)
mb.EmitForResource(metadata.WithResource(rb.Emit()))
}

func RecordMetrics(mb *metadata.MetricsBuilder, hpa *autoscalingv2.HorizontalPodAutoscaler, ts pcommon.Timestamp) {
mb.RecordK8sHpaMaxReplicasDataPoint(ts, int64(hpa.Spec.MaxReplicas))
mb.RecordK8sHpaMinReplicasDataPoint(ts, int64(*hpa.Spec.MinReplicas))
Expand All @@ -41,9 +28,3 @@ func GetMetadata(hpa *autoscalingv2.HorizontalPodAutoscaler) map[experimentalmet
experimentalmetricmetadata.ResourceID(hpa.UID): metadata.GetGenericMetadata(&hpa.ObjectMeta, "HPA"),
}
}

func GetMetadataBeta(hpa *autoscalingv2beta2.HorizontalPodAutoscaler) map[experimentalmetricmetadata.ResourceID]*metadata.KubernetesMetadata {
return map[experimentalmetricmetadata.ResourceID]*metadata.KubernetesMetadata{
experimentalmetricmetadata.ResourceID(hpa.UID): metadata.GetGenericMetadata(&hpa.ObjectMeta, "HPA"),
}
}
20 changes: 0 additions & 20 deletions receiver/k8sclusterreceiver/internal/testutils/objects.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
quotav1 "github.com/openshift/api/quota/v1"
appsv1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
Expand All @@ -34,25 +33,6 @@ func NewHPA(id string) *autoscalingv2.HorizontalPodAutoscaler {
}
}

func NewHPABeta(id string) *autoscalingv2beta2.HorizontalPodAutoscaler {
minReplicas := int32(2)
return &autoscalingv2beta2.HorizontalPodAutoscaler{
ObjectMeta: v1.ObjectMeta{
Name: "test-hpa-" + id,
Namespace: "test-namespace",
UID: types.UID("test-hpa-" + id + "-uid"),
},
Status: autoscalingv2beta2.HorizontalPodAutoscalerStatus{
CurrentReplicas: 5,
DesiredReplicas: 7,
},
Spec: autoscalingv2beta2.HorizontalPodAutoscalerSpec{
MinReplicas: &minReplicas,
MaxReplicas: 10,
},
}
}

func NewJob(id string) *batchv1.Job {
p := int32(2)
c := int32(10)
Expand Down
6 changes: 0 additions & 6 deletions receiver/k8sclusterreceiver/receiver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,12 +275,6 @@ func newFakeClientWithAllResources() *fake.Clientset {
gvkToAPIResource(gvk.HorizontalPodAutoscaler),
},
},
{
GroupVersion: "autoscaling/v2beta2",
APIResources: []v1.APIResource{
gvkToAPIResource(gvk.HorizontalPodAutoscalerBeta),
},
},
}
return client
}
Expand Down
14 changes: 2 additions & 12 deletions receiver/k8sclusterreceiver/watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@ import (
"go.uber.org/zap/zapcore"
appsv1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
Expand Down Expand Up @@ -138,8 +136,8 @@ func (rw *resourceWatcher) prepareSharedInformerFactory() error {
"ReplicaSet": {gvk.ReplicaSet},
"StatefulSet": {gvk.StatefulSet},
"Job": {gvk.Job},
"CronJob": {gvk.CronJob, gvk.CronJobBeta},
"HorizontalPodAutoscaler": {gvk.HorizontalPodAutoscaler, gvk.HorizontalPodAutoscalerBeta},
"CronJob": {gvk.CronJob},
"HorizontalPodAutoscaler": {gvk.HorizontalPodAutoscaler},
}

for kind, gvks := range supportedKinds {
Expand Down Expand Up @@ -214,12 +212,8 @@ func (rw *resourceWatcher) setupInformerForKind(kind schema.GroupVersionKind, fa
rw.setupInformer(kind, factory.Batch().V1().Jobs().Informer())
case gvk.CronJob:
rw.setupInformer(kind, factory.Batch().V1().CronJobs().Informer())
case gvk.CronJobBeta:
rw.setupInformer(kind, factory.Batch().V1beta1().CronJobs().Informer())
case gvk.HorizontalPodAutoscaler:
rw.setupInformer(kind, factory.Autoscaling().V2().HorizontalPodAutoscalers().Informer())
case gvk.HorizontalPodAutoscalerBeta:
rw.setupInformer(kind, factory.Autoscaling().V2beta2().HorizontalPodAutoscalers().Informer())
default:
rw.logger.Error("Could not setup an informer for provided group version kind",
zap.String("group version kind", kind.String()))
Expand Down Expand Up @@ -308,12 +302,8 @@ func (rw *resourceWatcher) objMetadata(obj interface{}) map[experimentalmetricme
return jobs.GetMetadata(o)
case *batchv1.CronJob:
return cronjob.GetMetadata(o)
case *batchv1beta1.CronJob:
return cronjob.GetMetadataBeta(o)
case *autoscalingv2.HorizontalPodAutoscaler:
return hpa.GetMetadata(o)
case *autoscalingv2beta2.HorizontalPodAutoscaler:
return hpa.GetMetadataBeta(o)
}
return nil
}
Expand Down
18 changes: 0 additions & 18 deletions receiver/k8sclusterreceiver/watcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,12 +118,6 @@ func TestIsKindSupported(t *testing.T) {
gvk: gvk.Pod,
expected: true,
},
{
name: "unsupported_kind",
client: fake.NewSimpleClientset(),
gvk: gvk.CronJobBeta,
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
Expand Down Expand Up @@ -178,24 +172,12 @@ func TestPrepareSharedInformerFactory(t *testing.T) {
gvkToAPIResource(gvk.Job),
},
},
{
GroupVersion: "batch/v1beta1",
APIResources: []metav1.APIResource{
gvkToAPIResource(gvk.CronJobBeta),
},
},
{
GroupVersion: "autoscaling/v2",
APIResources: []metav1.APIResource{
gvkToAPIResource(gvk.HorizontalPodAutoscaler),
},
},
{
GroupVersion: "autoscaling/v2beta2",
APIResources: []metav1.APIResource{
gvkToAPIResource(gvk.HorizontalPodAutoscalerBeta),
},
},
}
return client
}(),
Expand Down
Loading