diff --git a/.bingo/Variables.mk b/.bingo/Variables.mk index 494483658..28ccf20c6 100644 --- a/.bingo/Variables.mk +++ b/.bingo/Variables.mk @@ -47,12 +47,6 @@ $(CRD_REF_DOCS): $(BINGO_DIR)/crd-ref-docs.mod @echo "(re)installing $(GOBIN)/crd-ref-docs-v0.3.0" @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=crd-ref-docs.mod -o=$(GOBIN)/crd-ref-docs-v0.3.0 "github.com/elastic/crd-ref-docs" -GOJQ := $(GOBIN)/gojq-v0.12.17 -$(GOJQ): $(BINGO_DIR)/gojq.mod - @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/gojq-v0.12.17" - @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=gojq.mod -o=$(GOBIN)/gojq-v0.12.17 "github.com/itchyny/gojq/cmd/gojq" - GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.8.0 $(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. diff --git a/.bingo/gojq.mod b/.bingo/gojq.mod deleted file mode 100644 index 004aae3b1..000000000 --- a/.bingo/gojq.mod +++ /dev/null @@ -1,5 +0,0 @@ -module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT - -go 1.24.4 - -require github.com/itchyny/gojq v0.12.17 // cmd/gojq diff --git a/.bingo/gojq.sum b/.bingo/gojq.sum deleted file mode 100644 index e87b5b0e3..000000000 --- a/.bingo/gojq.sum +++ /dev/null @@ -1,17 +0,0 @@ -github.com/itchyny/gojq v0.12.17 h1:8av8eGduDb5+rvEdaOO+zQUjA04MS0m3Ps8HiD+fceg= -github.com/itchyny/gojq v0.12.17/go.mod h1:WBrEMkgAfAGO1LUcGOckBl5O726KPp+OlkKug0I/FEY= -github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= -github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/.bingo/variables.env b/.bingo/variables.env index 783b778af..58417ccf8 100644 --- a/.bingo/variables.env +++ b/.bingo/variables.env @@ -18,8 +18,6 @@ CRD_DIFF="${GOBIN}/crd-diff-v0.5.1-0.20260309184313-54162f2e3097" CRD_REF_DOCS="${GOBIN}/crd-ref-docs-v0.3.0" -GOJQ="${GOBIN}/gojq-v0.12.17" - GOLANGCI_LINT="${GOBIN}/golangci-lint-v2.8.0" GORELEASER="${GOBIN}/goreleaser-v2.11.2" diff --git a/Makefile b/Makefile index c662d98ab..165c8f0ef 100644 --- a/Makefile +++ b/Makefile @@ -221,8 +221,8 @@ fmt: $(YAMLFMT) #EXHELP Formats code $(YAMLFMT) -gitignore_excludes testdata .PHONY: update-tls-profiles -update-tls-profiles: $(GOJQ) #EXHELP Update TLS profiles from the Mozilla wiki - env JQ=$(GOJQ) hack/tools/update-tls-profiles.sh +update-tls-profiles: #EXHELP Update TLS profiles from the Mozilla wiki + hack/tools/update-tls-profiles.sh .PHONY: update-registryv1-bundle-schema update-registryv1-bundle-schema: #EXHELP Update registry+v1 bundle configuration JSON schema diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 392e12db4..92a6969a0 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -10,9 +10,7 @@ aliases: - tmshort olmv1-reviewers: - - anik120 - ankitathomas - - bentito - camilamacedo86 - dtfranz - fgiudici @@ -22,7 +20,6 @@ aliases: - OchiengEd - perdasilva - rashmigottipati - - thetechnick - tmshort - trgeiger - pedjak diff --git a/api/v1/clusterobjectset_types.go b/api/v1/clusterobjectset_types.go index ed1a4001e..d7e755992 100644 --- a/api/v1/clusterobjectset_types.go +++ b/api/v1/clusterobjectset_types.go @@ -510,6 +510,39 @@ type ClusterObjectSetStatus struct { // +listMapKey=type // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` + + // observedPhases records the content hashes of resolved phases + // at first successful reconciliation. This is used to detect if + // referenced object sources were deleted and recreated with + // different content. Each entry covers all fully-resolved object + // manifests within a phase, making it source-agnostic. + // + // +kubebuilder:validation:XValidation:rule="self == oldSelf || oldSelf.size() == 0",message="observedPhases is immutable" + // +kubebuilder:validation:MaxItems=20 + // +listType=map + // +listMapKey=name + // +optional + ObservedPhases []ObservedPhase `json:"observedPhases,omitempty"` +} + +// ObservedPhase records the observed content digest of a resolved phase. +type ObservedPhase struct { + // name is the phase name matching a phase in spec.phases. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:XValidation:rule=`!format.dns1123Label().validate(self).hasValue()`,message="the value must consist of only lowercase alphanumeric characters and hyphens, and must start with an alphabetic character and end with an alphanumeric character." + Name string `json:"name"` + + // digest is the digest of the phase's resolved object content + // at first successful resolution, in the format ":". + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:XValidation:rule=`self.matches('^[a-z0-9]+:[a-f0-9]+$')`,message="digest must be in the format ':'" + Digest string `json:"digest"` } // +genclient diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 8d1cea024..384439787 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -555,6 +555,11 @@ func (in *ClusterObjectSetStatus) DeepCopyInto(out *ClusterObjectSetStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ObservedPhases != nil { + in, out := &in.ObservedPhases, &out.ObservedPhases + *out = make([]ObservedPhase, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObjectSetStatus. @@ -664,6 +669,21 @@ func (in *ObjectSourceRef) DeepCopy() *ObjectSourceRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservedPhase) DeepCopyInto(out *ObservedPhase) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservedPhase. +func (in *ObservedPhase) DeepCopy() *ObservedPhase { + if in == nil { + return nil + } + out := new(ObservedPhase) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PreflightConfig) DeepCopyInto(out *PreflightConfig) { *out = *in diff --git a/applyconfigurations/api/v1/clusterobjectsetstatus.go b/applyconfigurations/api/v1/clusterobjectsetstatus.go index 987dd9839..6203563de 100644 --- a/applyconfigurations/api/v1/clusterobjectsetstatus.go +++ b/applyconfigurations/api/v1/clusterobjectsetstatus.go @@ -46,6 +46,12 @@ type ClusterObjectSetStatusApplyConfiguration struct { // The Succeeded condition represents whether the revision has successfully completed its rollout: // - When status is True and reason is Succeeded, the ClusterObjectSet has successfully completed its rollout. This condition is set once and persists even if the revision later becomes unavailable. Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + // observedPhases records the content hashes of resolved phases + // at first successful reconciliation. This is used to detect if + // referenced object sources were deleted and recreated with + // different content. Each entry covers all fully-resolved object + // manifests within a phase, making it source-agnostic. + ObservedPhases []ObservedPhaseApplyConfiguration `json:"observedPhases,omitempty"` } // ClusterObjectSetStatusApplyConfiguration constructs a declarative configuration of the ClusterObjectSetStatus type for use with @@ -66,3 +72,16 @@ func (b *ClusterObjectSetStatusApplyConfiguration) WithConditions(values ...*met } return b } + +// WithObservedPhases adds the given value to the ObservedPhases field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ObservedPhases field. +func (b *ClusterObjectSetStatusApplyConfiguration) WithObservedPhases(values ...*ObservedPhaseApplyConfiguration) *ClusterObjectSetStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithObservedPhases") + } + b.ObservedPhases = append(b.ObservedPhases, *values[i]) + } + return b +} diff --git a/applyconfigurations/api/v1/observedphase.go b/applyconfigurations/api/v1/observedphase.go new file mode 100644 index 000000000..75f5fb1d6 --- /dev/null +++ b/applyconfigurations/api/v1/observedphase.go @@ -0,0 +1,52 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by controller-gen-v0.20. DO NOT EDIT. + +package v1 + +// ObservedPhaseApplyConfiguration represents a declarative configuration of the ObservedPhase type for use +// with apply. +// +// ObservedPhase records the observed content digest of a resolved phase. +type ObservedPhaseApplyConfiguration struct { + // name is the phase name matching a phase in spec.phases. + Name *string `json:"name,omitempty"` + // digest is the digest of the phase's resolved object content + // at first successful resolution, in the format ":". + Digest *string `json:"digest,omitempty"` +} + +// ObservedPhaseApplyConfiguration constructs a declarative configuration of the ObservedPhase type for use with +// apply. +func ObservedPhase() *ObservedPhaseApplyConfiguration { + return &ObservedPhaseApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ObservedPhaseApplyConfiguration) WithName(value string) *ObservedPhaseApplyConfiguration { + b.Name = &value + return b +} + +// WithDigest sets the Digest field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Digest field is set to the value of the last call. +func (b *ObservedPhaseApplyConfiguration) WithDigest(value string) *ObservedPhaseApplyConfiguration { + b.Digest = &value + return b +} diff --git a/applyconfigurations/utils.go b/applyconfigurations/utils.go index 7b1373e4a..6a467f96a 100644 --- a/applyconfigurations/utils.go +++ b/applyconfigurations/utils.go @@ -83,6 +83,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &apiv1.ObjectSelectorApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ObjectSourceRef"): return &apiv1.ObjectSourceRefApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ObservedPhase"): + return &apiv1.ObservedPhaseApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PreflightConfig"): return &apiv1.PreflightConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ProgressionProbe"): diff --git a/docs/api-reference/crd-ref-docs-gen-config.yaml b/docs/api-reference/crd-ref-docs-gen-config.yaml index d7eb1e4e3..c42240a46 100644 --- a/docs/api-reference/crd-ref-docs-gen-config.yaml +++ b/docs/api-reference/crd-ref-docs-gen-config.yaml @@ -1,5 +1,5 @@ processor: - ignoreTypes: [ClusterObjectSet, ClusterObjectSetList] + ignoreTypes: [ClusterObjectSet, ClusterObjectSetList, ObservedPhase] ignoreFields: [] render: diff --git a/docs/draft/concepts/large-bundle-support.md b/docs/draft/concepts/large-bundle-support.md index 325aaacc1..dba87b982 100644 --- a/docs/draft/concepts/large-bundle-support.md +++ b/docs/draft/concepts/large-bundle-support.md @@ -142,14 +142,18 @@ Recommended conventions: 1. **Secret type**: Secrets should use the dedicated type `olm.operatorframework.io/object-data` to distinguish them from user-created Secrets and enable easy identification. The system always sets this type on - Secrets it creates. The reconciler does not enforce the type when resolving - refs — Secrets with any type are accepted — but producers should set it for - consistency. - -2. **Immutability**: Secrets should set `immutable: true`. Because COS phases - are immutable, the content backing a ref should not change after creation. - Mutable referenced Secrets are not rejected, but modifying them after the - COS is created leads to undefined behavior. + Secrets it creates. The reconciler does not enforce the Secret type when + resolving refs, but it does enforce that referenced Secrets have + `immutable: true` set and that their content has not changed since first + resolution. + +2. **Immutability**: Secrets must set `immutable: true`. The reconciler verifies + that all referenced Secrets have `immutable: true` set before proceeding. + Mutable referenced Secrets are rejected and reconciliation is blocked with + `Progressing=False, Reason=Blocked`. Additionally, the reconciler records + content hashes of the resolved phases on first successful reconciliation + and blocks reconciliation if the content changes (e.g., if a Secret is + deleted and recreated with the same name but different data). 3. **Owner references**: Referenced Secrets should carry an ownerReference to the COS so that Kubernetes garbage collection removes them when the COS is @@ -388,6 +392,14 @@ Key properties: ### COS reconciler behavior +Before resolving individual object refs, the reconciler verifies that all +referenced Secrets have `immutable: true` set. After successfully building +the phases (resolving all refs), the reconciler computes a per-phase content +digest and compares it against the digests recorded in `.status.observedPhases` +(if present). If any phase's content has changed, reconciliation is blocked +with `Progressing=False, Reason=Blocked`. On first successful build, phase +content digests are persisted to status for future comparisons. + When processing a COS phase: - For each object entry in the phase: - If `object` is set, use it directly (current behavior, unchanged). diff --git a/go.mod b/go.mod index fbdb616f6..baf907200 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/BurntSushi/toml v1.6.0 github.com/Masterminds/semver/v3 v3.4.0 github.com/blang/semver/v4 v4.0.0 - github.com/cert-manager/cert-manager v1.20.0 + github.com/cert-manager/cert-manager v1.20.1 github.com/containerd/containerd v1.7.30 github.com/cucumber/godog v0.15.1 github.com/evanphx/json-patch v5.9.11+incompatible @@ -20,9 +20,9 @@ require ( github.com/klauspost/compress v1.18.5 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 - github.com/operator-framework/api v0.41.0 + github.com/operator-framework/api v0.42.0 github.com/operator-framework/helm-operator-plugins v0.8.0 - github.com/operator-framework/operator-registry v1.64.0 + github.com/operator-framework/operator-registry v1.65.0 github.com/prometheus/client_golang v1.23.2 github.com/prometheus/common v0.67.5 github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 @@ -34,18 +34,18 @@ require ( golang.org/x/mod v0.34.0 golang.org/x/sync v0.20.0 golang.org/x/tools v0.43.0 - helm.sh/helm/v3 v3.20.1 + helm.sh/helm/v3 v3.20.2 k8s.io/api v0.35.3 k8s.io/apiextensions-apiserver v0.35.3 k8s.io/apimachinery v0.35.3 - k8s.io/apiserver v0.35.2 + k8s.io/apiserver v0.35.3 k8s.io/cli-runtime v0.35.1 k8s.io/client-go v1.5.2 - k8s.io/component-base v0.35.2 + k8s.io/component-base v0.35.3 k8s.io/klog/v2 v2.140.0 k8s.io/kubernetes v1.35.0 k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 - pkg.package-operator.run/boxcutter v0.13.0 + pkg.package-operator.run/boxcutter v0.13.1 sigs.k8s.io/controller-runtime v0.23.3 sigs.k8s.io/controller-tools v0.20.1 sigs.k8s.io/crdify v0.5.1-0.20260309184313-54162f2e3097 @@ -96,7 +96,7 @@ require ( github.com/cyphar/filepath-securejoin v0.6.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v29.3.0+incompatible // indirect + github.com/docker/cli v29.3.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v28.5.2+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.5 // indirect @@ -107,7 +107,7 @@ require ( github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.1 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.8.0 // indirect @@ -165,7 +165,7 @@ require ( github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.19 // indirect - github.com/mattn/go-sqlite3 v1.14.34 // indirect + github.com/mattn/go-sqlite3 v1.14.37 // indirect github.com/miekg/pkcs11 v1.1.2 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -215,12 +215,12 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect - go.opentelemetry.io/otel v1.42.0 // indirect + go.opentelemetry.io/otel v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect - go.opentelemetry.io/otel/metric v1.42.0 // indirect - go.opentelemetry.io/otel/sdk v1.40.0 // indirect - go.opentelemetry.io/otel/trace v1.42.0 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.opentelemetry.io/otel/trace v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.podman.io/common v0.67.0 // indirect go.podman.io/storage v1.62.0 // indirect @@ -236,7 +236,7 @@ require ( gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto v0.0.0-20260209200024-4cfbd4190f57 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 // indirect google.golang.org/grpc v1.79.3 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect diff --git a/go.sum b/go.sum index d1f115207..f8afd52bf 100644 --- a/go.sum +++ b/go.sum @@ -45,8 +45,8 @@ github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cert-manager/cert-manager v1.20.0 h1:czZamsFJ1YdKPhpv+SopJZN9t3W68vQBnFYUevSHGqY= -github.com/cert-manager/cert-manager v1.20.0/go.mod h1:3oparI7R5JyJMNXntYod9p6DucIZ84st7y/9kL6cbBE= +github.com/cert-manager/cert-manager v1.20.1 h1:99ExHJu5TPp1V92AvvE4oY6BkOSyJiWLxxMkbqbdGaY= +github.com/cert-manager/cert-manager v1.20.1/go.mod h1:ut67FnggYJJqAdDWLhSPnj10P06QwbNU88RYNh9MvMc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80= @@ -112,8 +112,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v29.3.0+incompatible h1:z3iWveU7h19Pqx7alZES8j+IeFQZ1lhTwb2F+V9SVvk= -github.com/docker/cli v29.3.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.3.1+incompatible h1:M04FDj2TRehDacrosh7Vlkgc7AuQoWloQkf1PA5hmoI= +github.com/docker/cli v29.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= @@ -151,8 +151,8 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/fxamacker/cbor/v2 v2.9.1 h1:2rWm8B193Ll4VdjsJY28jxs70IdDsHRWgQYAI80+rMQ= +github.com/fxamacker/cbor/v2 v2.9.1/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -353,8 +353,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mattn/go-sqlite3 v1.14.34 h1:3NtcvcUnFBPsuRcno8pUtupspG/GM+9nZ88zgJcp6Zk= -github.com/mattn/go-sqlite3 v1.14.34/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.37 h1:3DOZp4cXis1cUIpCfXLtmlGolNLp2VEqhiB/PARNBIg= +github.com/mattn/go-sqlite3 v1.14.37/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI= github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs= github.com/miekg/pkcs11 v1.1.2 h1:/VxmeAX5qU6Q3EwafypogwWbYryHFmF2RpkJmw3m4MQ= @@ -407,14 +407,14 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.3.0 h1:YZupQUdctfhpZy3TM39nN9Ika5CBWT5diQ8ibYCRkxg= github.com/opencontainers/runtime-spec v1.3.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/api v0.41.0 h1:B0nutndl95elbLXJGRlkFNTI8OuZIjSqvTEeORPhTKo= -github.com/operator-framework/api v0.41.0/go.mod h1:Ouud+eqruzll9X3iv8wuAOTNAyyEncYXp4IVgbIlIdg= +github.com/operator-framework/api v0.42.0 h1:rkc5V3zW8RxZMjePAe12jdL7Co/hwsYo1pLnkkhuR7s= +github.com/operator-framework/api v0.42.0/go.mod h1:bMEj+wl/8tGqcGNtxt38cLUYagu9chNsbYzb/5HQaUQ= github.com/operator-framework/helm-operator-plugins v0.8.0 h1:0f6HOQC5likkf0b/OvGvw7nhDb6h8Cj5twdCNjwNzMc= github.com/operator-framework/helm-operator-plugins v0.8.0/go.mod h1:Sc+8bE38xTCgCChBUvtq/PxatEg9fAypr7S5iAw8nlA= github.com/operator-framework/operator-lib v0.19.0 h1:az6ogYj21rtU0SF9uYctRLyKp2dtlqTsmpfehFy6Ce8= github.com/operator-framework/operator-lib v0.19.0/go.mod h1:KxycAjFnHt0DBtHmH3Jm7yHcY5sdrshPKTqM/HKAQ08= -github.com/operator-framework/operator-registry v1.64.0 h1:aEz8ERg+p9U50LCk2Lda4kuhFrSnqDu37yLFs0ex0qE= -github.com/operator-framework/operator-registry v1.64.0/go.mod h1:6PjoJsunu617xp7sGMxLGLFb6xvnmz6NHi+cUGH8oOY= +github.com/operator-framework/operator-registry v1.65.0 h1:SBNp8qPRWHwMVk4WBBWeQ5pOAq/fBusacIn5zVH03rk= +github.com/operator-framework/operator-registry v1.65.0/go.mod h1:psQG9kYyTR+2RNHgdwTvGjSbebmk9/Lzk3jS3kI36bc= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= @@ -533,8 +533,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= -go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= -go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0 h1:ZVg+kCXxd9LtAaQNKBxAvJ5NpMf7LpvEr4MIZqb0TMQ= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0/go.mod h1:hh0tMeZ75CCXrHd9OXRYxTlCAdxcXioWHFIpYw2rZu8= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0 h1:djrxvDxAe44mJUrKataUbOhCKhR3F8QCyWucO16hTQs= @@ -559,16 +559,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 h1:MzfofMZN8ulNqob go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0/go.mod h1:E73G9UFtKRXrxhBsHtG00TB5WxX57lpsQzogDkqBTz8= go.opentelemetry.io/otel/log v0.16.0 h1:DeuBPqCi6pQwtCK0pO4fvMB5eBq6sNxEnuTs88pjsN4= go.opentelemetry.io/otel/log v0.16.0/go.mod h1:rWsmqNVTLIA8UnwYVOItjyEZDbKIkMxdQunsIhpUMes= -go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= -go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= -go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= -go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= go.opentelemetry.io/otel/sdk/log v0.16.0 h1:e/b4bdlQwC5fnGtG3dlXUrNOnP7c8YLVSpSfEBIkTnI= go.opentelemetry.io/otel/sdk/log v0.16.0/go.mod h1:JKfP3T6ycy7QEuv3Hj8oKDy7KItrEkus8XJE6EoSzw4= -go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= -go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= -go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= -go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.podman.io/common v0.67.0 h1:6Ci5oU1ek08OAxBLkHEqSyWmjNh5zf03PRqZ04cPdwU= @@ -724,8 +724,8 @@ google.golang.org/genproto v0.0.0-20260209200024-4cfbd4190f57 h1:uZSB/r2MjH9Isqp google.golang.org/genproto v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:nGuPfp0lnDJcJD0J47StV0Skgnw3qMSQhjsLKiejq5Y= google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d h1:t/LOSXPJ9R0B6fnZNyALBRfZBH0Uy0gT+uR+SJ6syqQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 h1:ndE4FoJqsIceKP2oYSnUZqhTdYufCYYkqwtFzfrhI7w= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -762,8 +762,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -helm.sh/helm/v3 v3.20.1 h1:T8PodUaH1UwNvE+imUA2mIKjJItY8g7CVvLVP5g4NzI= -helm.sh/helm/v3 v3.20.1/go.mod h1:Fl1kBaWCpkUrM6IYXPjQ3bdZQfFrogKArqptvueZ6Ww= +helm.sh/helm/v3 v3.20.2 h1:binM4rvPx5DcNsa1sIt7UZi55lRbu3pZUFmQkSoRh48= +helm.sh/helm/v3 v3.20.2/go.mod h1:Fl1kBaWCpkUrM6IYXPjQ3bdZQfFrogKArqptvueZ6Ww= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= @@ -796,8 +796,8 @@ k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 h1:kBawHLSnx/mYHmRnNUf9d4CpjREbe k8s.io/utils v0.0.0-20260319190234-28399d86e0b5/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -pkg.package-operator.run/boxcutter v0.13.0 h1:LNUS36NFkI+6J1CcVMrmTOtZt8UoalwXcIDlTPG66C4= -pkg.package-operator.run/boxcutter v0.13.0/go.mod h1:Bo1tgiXCYJtehp5p+2aTrKt7VnJhrGKSSBvUQofnDRg= +pkg.package-operator.run/boxcutter v0.13.1 h1:FvrSBnHWuf6Co+HPyxRSw2Y5mHSkUtteW8klXS6L8gk= +pkg.package-operator.run/boxcutter v0.13.1/go.mod h1:rR2jd32uNt2eml7UvlYNCpCAYIpTALDzHxaftMfgkZo= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 h1:hSfpvjjTQXQY2Fol2CS0QHMNs/WI1MOSGzCm1KhM5ec= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.23.3 h1:VjB/vhoPoA9l1kEKZHBMnQF33tdCLQKJtydy4iqwZ80= diff --git a/hack/test/e2e-coverage.sh b/hack/test/e2e-coverage.sh index 49c8db3d7..e4b4610d4 100755 --- a/hack/test/e2e-coverage.sh +++ b/hack/test/e2e-coverage.sh @@ -22,8 +22,9 @@ rm -rf ${COVERAGE_DIR} && mkdir -p ${COVERAGE_DIR} kubectl -n "$OPERATOR_CONTROLLER_NAMESPACE" scale deployment/"$OPERATOR_CONTROLLER_MANAGER_DEPLOYMENT_NAME" --replicas=0 kubectl -n "$CATALOGD_NAMESPACE" scale deployment/"$CATALOGD_MANAGER_DEPLOYMENT_NAME" --replicas=0 -# Wait for the copy pod to be ready -kubectl -n "$OPERATOR_CONTROLLER_NAMESPACE" wait --for=condition=ready pod "$COPY_POD_NAME" +# Wait for manager pods to terminate so coverage data is flushed to the PVC +kubectl -n "$OPERATOR_CONTROLLER_NAMESPACE" wait --for=delete pods -l control-plane="$OPERATOR_CONTROLLER_MANAGER_DEPLOYMENT_NAME" --timeout=60s +kubectl -n "$CATALOGD_NAMESPACE" wait --for=delete pods -l control-plane="$CATALOGD_MANAGER_DEPLOYMENT_NAME" --timeout=60s # Copy the coverage data from the temporary pod kubectl -n "$OPERATOR_CONTROLLER_NAMESPACE" cp "$COPY_POD_NAME":/e2e-coverage/ "$COVERAGE_DIR" diff --git a/hack/tools/update-tls-profiles.sh b/hack/tools/update-tls-profiles.sh index 01618a318..10a3e270c 100755 --- a/hack/tools/update-tls-profiles.sh +++ b/hack/tools/update-tls-profiles.sh @@ -2,131 +2,10 @@ set -e -if [ -z "${JQ}" ]; then - echo "JQ not defined" - exit 1 -fi - -OUTPUT=internal/shared/util/tlsprofiles/mozilla_data.go +OUTPUT=internal/shared/util/tlsprofiles/mozilla_data.json INPUT=https://ssl-config.mozilla.org/guidelines/latest.json -TMPFILE="$(mktemp)" -trap 'rm -rf "$TMPFILE"' EXIT - -if ! curl -L -s -f "${INPUT}" > "${TMPFILE}"; then +if ! curl -L -s -f "${INPUT}" -o "${OUTPUT}"; then echo "ERROR: Failed to download ${INPUT} (HTTP error or connection failure)" >&2 exit 1 fi - -if ! ${JQ} empty "${TMPFILE}" 2>/dev/null; then - echo "ERROR: Downloaded data from ${INPUT} is not valid JSON" >&2 - exit 1 -fi - -# Extract stored version from current output file (may be empty on first run) -STORED_VERSION=$(grep '^// DATA VERSION:' "${OUTPUT}" 2>/dev/null | awk '{print $4}' || true) - -# Extract version from downloaded JSON and fail early if missing -NEW_VERSION=$(${JQ} -r '.version' "${TMPFILE}") -if [ -z "${NEW_VERSION}" ] || [ "${NEW_VERSION}" = "null" ]; then - echo "ERROR: Could not read .version from ${INPUT}" >&2 - exit 1 -fi - -if [ "${NEW_VERSION}" = "${STORED_VERSION}" ]; then - echo "Mozilla TLS data is already at version ${NEW_VERSION}, skipping regeneration." - exit 0 -fi -echo "Updating Mozilla TLS data from version ${STORED_VERSION:-unknown} to ${NEW_VERSION}" - -cat > "${OUTPUT}" <&2 - echo "Available profiles: $(${JQ} -r '.configurations | keys | join(", ")' "${TMPFILE}")" >&2 - exit 1 - fi - - # Validate tls_versions is a non-empty array with a non-null first entry - if ! ${JQ} -e ".configurations.${profile}.tls_versions | type == \"array\" and length > 0 and .[0] != null" "${TMPFILE}" >/dev/null; then - echo "ERROR: Missing or empty .configurations.${profile}.tls_versions[0] in ${INPUT}" >&2 - exit 1 - fi - - # Validate that at least one cipher is present across ciphersuites and ciphers.iana - # (modern has only ciphersuites; intermediate has both; either alone is valid) - local cipher_count - cipher_count=$(${JQ} -r " - [ - (.configurations.${profile}.ciphersuites // []), - (.configurations.${profile}.ciphers.iana // []) - ] | add | length" "${TMPFILE}") - if [ "${cipher_count}" -eq 0 ] 2>/dev/null; then - echo "ERROR: Profile '${profile}' has no ciphers in ciphersuites or ciphers.iana" >&2 - exit 1 - fi - - # Validate tls_curves is non-empty - local curve_count - curve_count=$(${JQ} -r ".configurations.${profile}.tls_curves | length" "${TMPFILE}") - if [ "${curve_count}" -eq 0 ] 2>/dev/null; then - echo "ERROR: Profile '${profile}' has no entries in tls_curves" >&2 - exit 1 - fi - - cat >> "${OUTPUT}" <> "${OUTPUT}" - ${JQ} -r "(.configurations.${profile}.ciphers.iana // [])[] | . |= \"tls.\" + . + \",\"" "${TMPFILE}" >> "${OUTPUT}" - - cat >> "${OUTPUT}" <> "${OUTPUT}" - - version=$(${JQ} -r ".configurations.${profile}.tls_versions[0]" "${TMPFILE}") - version=${version/TLSv1./tls.VersionTLS1} - version=${version/TLSv1/tls.VersionTLS10} - - cat >> "${OUTPUT}" <:". + maxLength: 256 + minLength: 1 + type: string + x-kubernetes-validations: + - message: digest must be in the format ':' + rule: self.matches('^[a-z0-9]+:[a-f0-9]+$') + name: + description: name is the phase name matching a phase in spec.phases. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens, and must start with an alphabetic + character and end with an alphanumeric character. + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - digest + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: observedPhases is immutable + rule: self == oldSelf || oldSelf.size() == 0 type: object type: object served: true diff --git a/internal/operator-controller/applier/phase.go b/internal/operator-controller/applier/phase.go index a0a85e915..dd5d68887 100644 --- a/internal/operator-controller/applier/phase.go +++ b/internal/operator-controller/applier/phase.go @@ -112,10 +112,10 @@ var ( PhaseInfrastructure: { {Kind: "Service"}, {Kind: "Issuer", Group: "cert-manager.io"}, + {Kind: "Certificate", Group: "cert-manager.io"}, }, PhaseDeploy: { - {Kind: "Certificate", Group: "cert-manager.io"}, {Kind: "Deployment", Group: "apps"}, }, diff --git a/internal/operator-controller/applier/phase_test.go b/internal/operator-controller/applier/phase_test.go index 2a4ade865..aba36d12f 100644 --- a/internal/operator-controller/applier/phase_test.go +++ b/internal/operator-controller/applier/phase_test.go @@ -160,6 +160,17 @@ func Test_PhaseSort(t *testing.T) { }, }, }, + { + Object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "cert-manager.io/v1", + "kind": "Certificate", + "metadata": map[string]interface{}{ + "name": "test", + }, + }, + }, + }, { Object: &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -309,6 +320,17 @@ func Test_PhaseSort(t *testing.T) { }, }, }, + { + Object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "cert-manager.io/v1", + "kind": "Certificate", + "metadata": map[string]interface{}{ + "name": "test", + }, + }, + }, + }, }, }, { @@ -449,7 +471,7 @@ func Test_PhaseSort(t *testing.T) { want: []*ocv1ac.ClusterObjectSetPhaseApplyConfiguration{}, }, { - name: "sort by group within same phase", + name: "sort by group across infrastructure and deploy phases", objs: []ocv1ac.ClusterObjectSetObjectApplyConfiguration{ { Object: &unstructured.Unstructured{ @@ -476,24 +498,29 @@ func Test_PhaseSort(t *testing.T) { }, want: []*ocv1ac.ClusterObjectSetPhaseApplyConfiguration{ { - Name: ptr.To(string(applier.PhaseDeploy)), + Name: ptr.To(string(applier.PhaseInfrastructure)), Objects: []ocv1ac.ClusterObjectSetObjectApplyConfiguration{ { Object: &unstructured.Unstructured{ Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "Deployment", + "apiVersion": "cert-manager.io/v1", + "kind": "Certificate", "metadata": map[string]interface{}{ "name": "test", }, }, }, }, + }, + }, + { + Name: ptr.To(string(applier.PhaseDeploy)), + Objects: []ocv1ac.ClusterObjectSetObjectApplyConfiguration{ { Object: &unstructured.Unstructured{ Object: map[string]interface{}{ - "apiVersion": "cert-manager.io/v1", - "kind": "Certificate", + "apiVersion": "apps/v1", + "kind": "Deployment", "metadata": map[string]interface{}{ "name": "test", }, diff --git a/internal/operator-controller/controllers/clusterobjectset_controller.go b/internal/operator-controller/controllers/clusterobjectset_controller.go index 055f962ce..2d81b71c9 100644 --- a/internal/operator-controller/controllers/clusterobjectset_controller.go +++ b/internal/operator-controller/controllers/clusterobjectset_controller.go @@ -6,6 +6,7 @@ import ( "bytes" "compress/gzip" "context" + "crypto/sha256" "encoding/json" "errors" "fmt" @@ -15,6 +16,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -144,16 +146,24 @@ func (c *ClusterObjectSetReconciler) reconcile(ctx context.Context, cos *ocv1.Cl return c.delete(ctx, cos) } - phases, opts, err := c.buildBoxcutterPhases(ctx, cos) + if err := c.verifyReferencedSecretsImmutable(ctx, cos); err != nil { + l.Error(err, "referenced Secret verification failed, blocking reconciliation") + markAsNotProgressing(cos, ocv1.ClusterObjectSetReasonBlocked, err.Error()) + return ctrl.Result{}, nil + } + + phases, currentPhases, opts, err := c.buildBoxcutterPhases(ctx, cos) if err != nil { setRetryingConditions(cos, err.Error()) return ctrl.Result{}, fmt.Errorf("converting to boxcutter revision: %v", err) } - siblings, err := c.siblingRevisionNames(ctx, cos) - if err != nil { - setRetryingConditions(cos, err.Error()) - return ctrl.Result{}, fmt.Errorf("listing sibling revisions: %v", err) + if len(cos.Status.ObservedPhases) == 0 { + cos.Status.ObservedPhases = currentPhases + } else if err := verifyObservedPhases(cos.Status.ObservedPhases, currentPhases); err != nil { + l.Error(err, "resolved phases content changed, blocking reconciliation") + markAsNotProgressing(cos, ocv1.ClusterObjectSetReasonBlocked, err.Error()) + return ctrl.Result{}, nil } revisionEngine, err := c.RevisionEngineFactory.CreateRevisionEngine(ctx, cos) @@ -218,11 +228,6 @@ func (c *ClusterObjectSetReconciler) reconcile(ctx context.Context, cos *ocv1.Cl if ores.Action() == machinery.ActionCollision { collidingObjs = append(collidingObjs, ores.String()) } - if ores.Action() == machinery.ActionProgressed && siblings != nil { - if ref := foreignRevisionController(ores.Object(), siblings); ref != nil { - collidingObjs = append(collidingObjs, ores.String()+fmt.Sprintf("\nConflicting Owner: %s", ref.String())) - } - } } if len(collidingObjs) > 0 { @@ -473,10 +478,10 @@ func (c *ClusterObjectSetReconciler) listPreviousRevisions(ctx context.Context, return previous, nil } -func (c *ClusterObjectSetReconciler) buildBoxcutterPhases(ctx context.Context, cos *ocv1.ClusterObjectSet) ([]boxcutter.Phase, []boxcutter.RevisionReconcileOption, error) { +func (c *ClusterObjectSetReconciler) buildBoxcutterPhases(ctx context.Context, cos *ocv1.ClusterObjectSet) ([]boxcutter.Phase, []ocv1.ObservedPhase, []boxcutter.RevisionReconcileOption, error) { previous, err := c.listPreviousRevisions(ctx, cos) if err != nil { - return nil, nil, fmt.Errorf("listing previous revisions: %w", err) + return nil, nil, nil, fmt.Errorf("listing previous revisions: %w", err) } // Convert to []client.Object for boxcutter @@ -487,17 +492,19 @@ func (c *ClusterObjectSetReconciler) buildBoxcutterPhases(ctx context.Context, c progressionProbes, err := buildProgressionProbes(cos.Spec.ProgressionProbes) if err != nil { - return nil, nil, err + return nil, nil, nil, err } opts := []boxcutter.RevisionReconcileOption{ boxcutter.WithPreviousOwners(previousObjs), boxcutter.WithProbe(boxcutter.ProgressProbeType, progressionProbes), + boxcutter.WithAggregatePhaseReconcileErrors(), } - phases := make([]boxcutter.Phase, 0) + phases := make([]boxcutter.Phase, 0, len(cos.Spec.Phases)) + observedPhases := make([]ocv1.ObservedPhase, 0, len(cos.Spec.Phases)) for _, specPhase := range cos.Spec.Phases { - objs := make([]client.Object, 0) + objs := make([]client.Object, 0, len(specPhase.Objects)) for _, specObj := range specPhase.Objects { var obj *unstructured.Unstructured switch { @@ -506,13 +513,25 @@ func (c *ClusterObjectSetReconciler) buildBoxcutterPhases(ctx context.Context, c case specObj.Ref.Name != "": resolved, err := c.resolveObjectRef(ctx, specObj.Ref) if err != nil { - return nil, nil, fmt.Errorf("resolving ref in phase %q: %w", specPhase.Name, err) + return nil, nil, nil, fmt.Errorf("resolving ref in phase %q: %w", specPhase.Name, err) } obj = resolved default: - return nil, nil, fmt.Errorf("object in phase %q has neither object nor ref", specPhase.Name) + return nil, nil, nil, fmt.Errorf("object in phase %q has neither object nor ref", specPhase.Name) } + objs = append(objs, obj) + } + + // Compute digest from the user-provided objects before controller mutations. + digest, err := computePhaseDigest(specPhase.Name, objs) + if err != nil { + return nil, nil, nil, fmt.Errorf("computing phase digest: %w", err) + } + observedPhases = append(observedPhases, ocv1.ObservedPhase{Name: specPhase.Name, Digest: digest}) + + // Apply controller mutations after digest computation. + for i, obj := range objs { objLabels := obj.GetLabels() if objLabels == nil { objLabels = map[string]string{} @@ -520,17 +539,16 @@ func (c *ClusterObjectSetReconciler) buildBoxcutterPhases(ctx context.Context, c objLabels[labels.OwnerNameKey] = cos.Labels[labels.OwnerNameKey] obj.SetLabels(objLabels) - switch cp := EffectiveCollisionProtection(cos.Spec.CollisionProtection, specPhase.CollisionProtection, specObj.CollisionProtection); cp { + switch cp := EffectiveCollisionProtection(cos.Spec.CollisionProtection, specPhase.CollisionProtection, specPhase.Objects[i].CollisionProtection); cp { case ocv1.CollisionProtectionIfNoController, ocv1.CollisionProtectionNone: opts = append(opts, boxcutter.WithObjectReconcileOptions( obj, boxcutter.WithCollisionProtection(cp))) } - - objs = append(objs, obj) } + phases = append(phases, boxcutter.NewPhase(specPhase.Name, objs)) } - return phases, opts, nil + return phases, observedPhases, opts, nil } // resolveObjectRef fetches the referenced Secret, reads the value at the specified key, @@ -586,42 +604,6 @@ func EffectiveCollisionProtection(cp ...ocv1.CollisionProtection) ocv1.Collision return ecp } -// siblingRevisionNames returns the names of all ClusterObjectSets that belong to -// the same ClusterExtension as cos. Returns nil when cos has no owner label. -func (c *ClusterObjectSetReconciler) siblingRevisionNames(ctx context.Context, cos *ocv1.ClusterObjectSet) (sets.Set[string], error) { - ownerLabel, ok := cos.Labels[labels.OwnerNameKey] - if !ok { - return nil, nil - } - revList := &ocv1.ClusterObjectSetList{} - if err := c.TrackingCache.List(ctx, revList, client.MatchingLabels{ - labels.OwnerNameKey: ownerLabel, - }); err != nil { - return nil, fmt.Errorf("listing sibling revisions: %w", err) - } - names := sets.New[string]() - for i := range revList.Items { - names.Insert(revList.Items[i].Name) - } - return names, nil -} - -// foreignRevisionController returns the controller OwnerReference when obj is owned by a -// ClusterObjectSet that is not in siblings (i.e. belongs to a different ClusterExtension). -// Returns nil when the controller is a sibling or is not a ClusterObjectSet. -func foreignRevisionController(obj metav1.Object, siblings sets.Set[string]) *metav1.OwnerReference { - refs := obj.GetOwnerReferences() - for i := range refs { - if refs[i].Controller != nil && *refs[i].Controller && - refs[i].Kind == ocv1.ClusterObjectSetKind && - refs[i].APIVersion == ocv1.GroupVersion.String() && - !siblings.Has(refs[i].Name) { - return &refs[i] - } - } - return nil -} - // buildProgressionProbes creates a set of boxcutter probes from the fields provided in the COS's spec.progressionProbes. // Returns nil and an error if encountered while attempting to build the probes. func buildProgressionProbes(progressionProbes []ocv1.ProgressionProbe) (probing.And, error) { @@ -739,3 +721,99 @@ func markAsArchived(cos *ocv1.ClusterObjectSet) bool { updated := markAsNotProgressing(cos, ocv1.ClusterObjectSetReasonArchived, msg) return markAsAvailableUnknown(cos, ocv1.ClusterObjectSetReasonArchived, msg) || updated } + +// computePhaseDigest computes a deterministic SHA-256 digest of a phase's +// resolved content (name + objects) before any controller mutations. +// JSON serialization of unstructured objects produces a canonical encoding +// with sorted map keys. +func computePhaseDigest(name string, objects []client.Object) (string, error) { + phaseMap := map[string]any{ + "name": name, + "objects": objects, + } + data, err := json.Marshal(phaseMap) + if err != nil { + return "", fmt.Errorf("marshaling phase %q: %w", name, err) + } + h := sha256.Sum256(data) + return fmt.Sprintf("sha256:%x", h), nil +} + +// verifyObservedPhases compares current per-phase digests against stored +// digests. Returns an error listing all mismatched phases. +func verifyObservedPhases(stored, current []ocv1.ObservedPhase) error { + if len(stored) == 0 { + return fmt.Errorf("stored observedPhases is unexpectedly empty") + } + if len(stored) != len(current) { + return fmt.Errorf("number of phases has changed (expected %d phases, got %d)", len(stored), len(current)) + } + storedMap := make(map[string]string, len(stored)) + for _, s := range stored { + storedMap[s.Name] = s.Digest + } + var mismatches []string + for _, c := range current { + if prev, ok := storedMap[c.Name]; ok && prev != c.Digest { + mismatches = append(mismatches, fmt.Sprintf( + "phase %q (expected digest %s, got %s)", c.Name, prev, c.Digest)) + } + } + if len(mismatches) > 0 { + return fmt.Errorf( + "resolved content of %d phase(s) has changed: %s; "+ + "a referenced object source may have been deleted and recreated with different content", + len(mismatches), strings.Join(mismatches, "; ")) + } + return nil +} + +// verifyReferencedSecretsImmutable checks that all referenced Secrets +// have Immutable set to true. It collects all violations and returns +// a single error listing every misconfigured Secret. +func (c *ClusterObjectSetReconciler) verifyReferencedSecretsImmutable(ctx context.Context, cos *ocv1.ClusterObjectSet) error { + type secretRef struct { + name string + namespace string + } + seen := make(map[secretRef]struct{}) + var refs []secretRef + + for _, phase := range cos.Spec.Phases { + for _, obj := range phase.Objects { + if obj.Ref.Name == "" { + continue + } + sr := secretRef{name: obj.Ref.Name, namespace: obj.Ref.Namespace} + if _, ok := seen[sr]; !ok { + seen[sr] = struct{}{} + refs = append(refs, sr) + } + } + } + + var mutableSecrets []string + for _, ref := range refs { + secret := &corev1.Secret{} + key := client.ObjectKey{Name: ref.name, Namespace: ref.namespace} + if err := c.Client.Get(ctx, key, secret); err != nil { + if apierrors.IsNotFound(err) { + // Secret not yet available — skip verification. + // resolveObjectRef will handle the not-found with a retryable error. + continue + } + return fmt.Errorf("getting Secret %s/%s: %w", ref.namespace, ref.name, err) + } + + if secret.Immutable == nil || !*secret.Immutable { + mutableSecrets = append(mutableSecrets, fmt.Sprintf("%s/%s", ref.namespace, ref.name)) + } + } + + if len(mutableSecrets) > 0 { + return fmt.Errorf("the following secrets are not immutable (referenced secrets must have immutable set to true): %s", + strings.Join(mutableSecrets, ", ")) + } + + return nil +} diff --git a/internal/operator-controller/controllers/clusterobjectset_controller_internal_test.go b/internal/operator-controller/controllers/clusterobjectset_controller_internal_test.go index 15300f63c..654de3388 100644 --- a/internal/operator-controller/controllers/clusterobjectset_controller_internal_test.go +++ b/internal/operator-controller/controllers/clusterobjectset_controller_internal_test.go @@ -2,17 +2,25 @@ package controllers import ( "context" + "fmt" + "strings" + "sync/atomic" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -235,3 +243,264 @@ func (m *mockTrackingCacheInternal) Watch(ctx context.Context, user client.Objec func (m *mockTrackingCacheInternal) Source(h handler.EventHandler, predicates ...predicate.Predicate) source.Source { return nil } + +func TestComputePhaseDigest(t *testing.T) { + makeObj := func(apiVersion, kind, name string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]interface{}{"name": name}, + }, + } + } + + t.Run("deterministic for same content", func(t *testing.T) { + objs := []client.Object{makeObj("v1", "ConfigMap", "cm1")} + hash, err := computePhaseDigest("deploy", objs) + require.NoError(t, err) + assert.Equal(t, "sha256:e159e3f2c46b65df156d02407c44936c0fd7349149a89dadf190d27c67019edc", hash) + }) + + t.Run("deterministic with many objects", func(t *testing.T) { + objs := make([]client.Object, 100) + for i := range objs { + objs[i] = makeObj("v1", "ConfigMap", fmt.Sprintf("cm-%d", i)) + } + var first string + for i := range 100 { + digest, err := computePhaseDigest("deploy", objs) + require.NoError(t, err) + if i == 0 { + first = digest + } else { + assert.Equal(t, first, digest, "digest changed on iteration %d", i) + } + } + }) + + t.Run("different for different object content", func(t *testing.T) { + h1, err := computePhaseDigest("deploy", []client.Object{makeObj("v1", "ConfigMap", "cm1")}) + require.NoError(t, err) + h2, err := computePhaseDigest("deploy", []client.Object{makeObj("v1", "ConfigMap", "cm2")}) + require.NoError(t, err) + assert.NotEqual(t, h1, h2) + }) + + t.Run("different for different phase names", func(t *testing.T) { + objs := []client.Object{makeObj("v1", "ConfigMap", "cm1")} + h1, err := computePhaseDigest("deploy", objs) + require.NoError(t, err) + h2, err := computePhaseDigest("crds", objs) + require.NoError(t, err) + assert.NotEqual(t, h1, h2) + }) + + t.Run("different order produces different digest", func(t *testing.T) { + obj1 := makeObj("v1", "ConfigMap", "cm1") + obj2 := makeObj("v1", "ConfigMap", "cm2") + h1, err := computePhaseDigest("deploy", []client.Object{obj1, obj2}) + require.NoError(t, err) + h2, err := computePhaseDigest("deploy", []client.Object{obj2, obj1}) + require.NoError(t, err) + assert.NotEqual(t, h1, h2) + }) + + t.Run("empty phase produces valid digest", func(t *testing.T) { + hash, err := computePhaseDigest("empty", nil) + require.NoError(t, err) + assert.NotEmpty(t, hash) + }) + + t.Run("digest has sha256 prefix", func(t *testing.T) { + digest, err := computePhaseDigest("deploy", []client.Object{makeObj("v1", "ConfigMap", "cm1")}) + require.NoError(t, err) + assert.True(t, strings.HasPrefix(digest, "sha256:"), "digest should start with sha256: prefix, got %s", digest) + }) +} + +func TestVerifyObservedPhases(t *testing.T) { + t.Run("passes when digests match", func(t *testing.T) { + stored := []ocv1.ObservedPhase{{Name: "deploy", Digest: "sha256:abc123"}} + current := []ocv1.ObservedPhase{{Name: "deploy", Digest: "sha256:abc123"}} + assert.NoError(t, verifyObservedPhases(stored, current)) + }) + + t.Run("fails when digest changes", func(t *testing.T) { + stored := []ocv1.ObservedPhase{{Name: "deploy", Digest: "sha256:abc123"}} + current := []ocv1.ObservedPhase{{Name: "deploy", Digest: "sha256:def456"}} + err := verifyObservedPhases(stored, current) + require.Error(t, err) + assert.Contains(t, err.Error(), `resolved content of 1 phase(s) has changed`) + assert.Contains(t, err.Error(), `phase "deploy"`) + }) + + t.Run("reports all mismatched phases", func(t *testing.T) { + stored := []ocv1.ObservedPhase{ + {Name: "deploy", Digest: "sha256:aaa"}, + {Name: "crds", Digest: "sha256:bbb"}, + {Name: "rbac", Digest: "sha256:ccc"}, + } + current := []ocv1.ObservedPhase{ + {Name: "deploy", Digest: "sha256:xxx"}, + {Name: "crds", Digest: "sha256:yyy"}, + {Name: "rbac", Digest: "sha256:ccc"}, + } + err := verifyObservedPhases(stored, current) + require.Error(t, err) + assert.Contains(t, err.Error(), `resolved content of 2 phase(s) has changed`) + assert.Contains(t, err.Error(), `phase "deploy"`) + assert.Contains(t, err.Error(), `phase "crds"`) + assert.NotContains(t, err.Error(), `phase "rbac"`) + }) + + t.Run("fails when phase count changes", func(t *testing.T) { + stored := []ocv1.ObservedPhase{{Name: "deploy", Digest: "sha256:abc123"}} + current := []ocv1.ObservedPhase{ + {Name: "deploy", Digest: "sha256:abc123"}, + {Name: "crds", Digest: "sha256:def456"}, + } + err := verifyObservedPhases(stored, current) + require.Error(t, err) + assert.Contains(t, err.Error(), "number of phases has changed") + }) + + t.Run("fails with empty stored", func(t *testing.T) { + current := []ocv1.ObservedPhase{{Name: "deploy", Digest: "sha256:abc123"}} + err := verifyObservedPhases(nil, current) + require.Error(t, err) + assert.Contains(t, err.Error(), "unexpectedly empty") + }) +} + +func TestVerifyReferencedSecretsImmutable(t *testing.T) { + testScheme := runtime.NewScheme() + require.NoError(t, ocv1.AddToScheme(testScheme)) + require.NoError(t, corev1.AddToScheme(testScheme)) + + immutableSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "test-ns", + }, + Immutable: ptr.To(true), + Data: map[string][]byte{ + "obj": []byte(`{"apiVersion":"v1","kind":"ConfigMap"}`), + }, + } + + t.Run("succeeds when all secrets are immutable", func(t *testing.T) { + testClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(immutableSecret.DeepCopy()). + Build() + + reconciler := &ClusterObjectSetReconciler{Client: testClient} + + cos := &ocv1.ClusterObjectSet{ + Spec: ocv1.ClusterObjectSetSpec{ + Phases: []ocv1.ClusterObjectSetPhase{{ + Name: "phase1", + Objects: []ocv1.ClusterObjectSetObject{{ + Ref: ocv1.ObjectSourceRef{ + Name: "test-secret", + Namespace: "test-ns", + Key: "obj", + }, + }}, + }}, + }, + } + + err := reconciler.verifyReferencedSecretsImmutable(t.Context(), cos) + require.NoError(t, err) + }) + + t.Run("rejects non-immutable secret", func(t *testing.T) { + mutableSecret := immutableSecret.DeepCopy() + mutableSecret.Immutable = nil + + testClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(mutableSecret). + Build() + + reconciler := &ClusterObjectSetReconciler{Client: testClient} + + cos := &ocv1.ClusterObjectSet{ + Spec: ocv1.ClusterObjectSetSpec{ + Phases: []ocv1.ClusterObjectSetPhase{{ + Name: "phase1", + Objects: []ocv1.ClusterObjectSetObject{{ + Ref: ocv1.ObjectSourceRef{ + Name: "test-secret", + Namespace: "test-ns", + Key: "obj", + }, + }}, + }}, + }, + } + + err := reconciler.verifyReferencedSecretsImmutable(t.Context(), cos) + require.Error(t, err) + assert.Contains(t, err.Error(), "not immutable") + }) + + t.Run("skips phases with inline objects only", func(t *testing.T) { + testClient := fake.NewClientBuilder(). + WithScheme(testScheme). + Build() + + reconciler := &ClusterObjectSetReconciler{Client: testClient} + + cos := &ocv1.ClusterObjectSet{ + Spec: ocv1.ClusterObjectSetSpec{ + Phases: []ocv1.ClusterObjectSetPhase{{ + Name: "phase1", + Objects: []ocv1.ClusterObjectSetObject{{ + // Inline object, no ref + }}, + }}, + }, + } + + err := reconciler.verifyReferencedSecretsImmutable(t.Context(), cos) + require.NoError(t, err) + }) + + t.Run("checks secret only once when referenced multiple times", func(t *testing.T) { + var secretGetCount atomic.Int32 + secret := immutableSecret.DeepCopy() + testClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(secret). + WithInterceptorFuncs(interceptor.Funcs{ + Get: func(ctx context.Context, c client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if _, ok := obj.(*corev1.Secret); ok { + secretGetCount.Add(1) + } + return c.Get(ctx, key, obj, opts...) + }, + }). + Build() + + reconciler := &ClusterObjectSetReconciler{Client: testClient} + + cos := &ocv1.ClusterObjectSet{ + Spec: ocv1.ClusterObjectSetSpec{ + Phases: []ocv1.ClusterObjectSetPhase{{ + Name: "phase1", + Objects: []ocv1.ClusterObjectSetObject{ + {Ref: ocv1.ObjectSourceRef{Name: "test-secret", Namespace: "test-ns", Key: "obj"}}, + {Ref: ocv1.ObjectSourceRef{Name: "test-secret", Namespace: "test-ns", Key: "obj2"}}, + }, + }}, + }, + } + + err := reconciler.verifyReferencedSecretsImmutable(t.Context(), cos) + require.NoError(t, err) + assert.Equal(t, int32(1), secretGetCount.Load(), "secret should be fetched only once despite multiple references") + }) +} diff --git a/internal/operator-controller/controllers/clusterobjectset_controller_test.go b/internal/operator-controller/controllers/clusterobjectset_controller_test.go index ccfb9755e..b3b10f575 100644 --- a/internal/operator-controller/controllers/clusterobjectset_controller_test.go +++ b/internal/operator-controller/controllers/clusterobjectset_controller_test.go @@ -1355,7 +1355,7 @@ func Test_ClusterObjectSetReconciler_Reconcile_ForeignRevisionCollision(t *testi expectCollision bool }{ { - name: "progressed object owned by a foreign COS is treated as a collision", + name: "collision object owned by a foreign COS is detected", reconcilingRevisionName: "ext-B-1", existingObjs: func() []client.Object { extA := &ocv1.ClusterExtension{ @@ -1390,7 +1390,7 @@ func Test_ClusterObjectSetReconciler_Reconcile_ForeignRevisionCollision(t *testi name: "everything", objects: []machinery.ObjectResult{ mockObjectResult{ - action: machinery.ActionProgressed, + action: machinery.ActionCollision, object: &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "apiextensions.k8s.io/v1", @@ -1570,7 +1570,6 @@ func Test_ClusterObjectSetReconciler_Reconcile_ForeignRevisionCollision(t *testi require.Equal(t, metav1.ConditionTrue, cond.Status) require.Equal(t, ocv1.ClusterObjectSetReasonRetrying, cond.Reason) require.Contains(t, cond.Message, "revision object collisions") - require.Contains(t, cond.Message, "Conflicting Owner") } else { require.Equal(t, ctrl.Result{}, result) require.NoError(t, err) diff --git a/internal/operator-controller/controllers/resolve_ref_test.go b/internal/operator-controller/controllers/resolve_ref_test.go index da3465ca7..53e0df2c9 100644 --- a/internal/operator-controller/controllers/resolve_ref_test.go +++ b/internal/operator-controller/controllers/resolve_ref_test.go @@ -14,6 +14,7 @@ import ( apimachineryruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" clocktesting "k8s.io/utils/clock/testing" + "k8s.io/utils/ptr" "pkg.package-operator.run/boxcutter/machinery" machinerytypes "pkg.package-operator.run/boxcutter/machinery/types" ctrl "sigs.k8s.io/controller-runtime" @@ -51,6 +52,7 @@ func TestResolveObjectRef_PlainJSON(t *testing.T) { Name: "test-secret", Namespace: "olmv1-system", }, + Immutable: ptr.To(true), Data: map[string][]byte{ "my-key": cmData, }, @@ -112,6 +114,7 @@ func TestResolveObjectRef_GzipCompressed(t *testing.T) { Name: "test-secret-gz", Namespace: "olmv1-system", }, + Immutable: ptr.To(true), Data: map[string][]byte{ "my-key": buf.Bytes(), }, @@ -184,6 +187,7 @@ func TestResolveObjectRef_KeyNotFound(t *testing.T) { Name: "test-secret-nokey", Namespace: "olmv1-system", }, + Immutable: ptr.To(true), Data: map[string][]byte{ "other-key": []byte("{}"), }, @@ -223,6 +227,7 @@ func TestResolveObjectRef_InvalidJSON(t *testing.T) { Name: "test-secret-invalid", Namespace: "olmv1-system", }, + Immutable: ptr.To(true), Data: map[string][]byte{ "my-key": []byte("not-valid-json"), }, diff --git a/internal/operator-controller/controllers/revision_engine_factory.go b/internal/operator-controller/controllers/revision_engine_factory.go index b63536d70..9311e8641 100644 --- a/internal/operator-controller/controllers/revision_engine_factory.go +++ b/internal/operator-controller/controllers/revision_engine_factory.go @@ -69,7 +69,7 @@ func (f *defaultRevisionEngineFactory) CreateRevisionEngine(_ context.Context, r machinery.NewObjectEngine( f.Scheme, f.TrackingCache, scopedClient, machinery.NewComparator(f.DiscoveryClient, f.Scheme, f.FieldOwnerPrefix), - f.FieldOwnerPrefix, f.FieldOwnerPrefix, + f.FieldOwnerPrefix, f.FieldOwnerPrefix, scopedClient, ), validation.NewClusterPhaseValidator(f.RESTMapper, scopedClient), ), diff --git a/internal/shared/util/tlsprofiles/mozilla_data.go b/internal/shared/util/tlsprofiles/mozilla_data.go index 43789b7e8..8b513172b 100644 --- a/internal/shared/util/tlsprofiles/mozilla_data.go +++ b/internal/shared/util/tlsprofiles/mozilla_data.go @@ -1,53 +1,104 @@ package tlsprofiles -// DO NOT EDIT, GENERATED BY hack/tools/update-tls-profiles.sh -// DATA SOURCE: https://ssl-config.mozilla.org/guidelines/latest.json -// DATA VERSION: 6 +// This file embeds the Mozilla SSL/TLS Configuration Guidelines JSON and parses +// it at init() time to populate the modern and intermediate TLS profiles. +// Run `make update-tls-profiles` to refresh mozilla_data.json from the upstream spec. import ( "crypto/tls" + _ "embed" + "encoding/json" + "fmt" ) -var modernTLSProfile = tlsProfile{ - ciphers: cipherSlice{ - cipherNums: []uint16{ - tls.TLS_AES_128_GCM_SHA256, - tls.TLS_AES_256_GCM_SHA384, - tls.TLS_CHACHA20_POLY1305_SHA256, - }, - }, - curves: curveSlice{ - curveNums: []tls.CurveID{ - X25519MLKEM768, - X25519, - prime256v1, - secp384r1, - }, - }, - minTLSVersion: tls.VersionTLS13, +//go:embed mozilla_data.json +var mozillaDataJSON []byte + +// skippedCiphers records cipher names from mozilla_data.json that are not +// supported by Go's crypto/tls and were omitted from the profiles. +var skippedCiphers []string + +// skippedCurves records curve names from mozilla_data.json that are not +// supported by Go's crypto/tls and were omitted from the profiles. +var skippedCurves []string + +var ( + modernTLSProfile tlsProfile + intermediateTLSProfile tlsProfile +) + +type mozillaConfiguration struct { + Ciphersuites []string `json:"ciphersuites"` + Ciphers struct { + IANA []string `json:"iana"` + } `json:"ciphers"` + TLSCurves []string `json:"tls_curves"` + TLSVersions []string `json:"tls_versions"` +} + +type mozillaSpec struct { + Configurations map[string]mozillaConfiguration `json:"configurations"` +} + +func init() { + var spec mozillaSpec + if err := json.Unmarshal(mozillaDataJSON, &spec); err != nil { + panic(fmt.Sprintf("tlsprofiles: failed to parse embedded mozilla_data.json: %v", err)) + } + + for _, name := range []string{"modern", "intermediate"} { + cfg, ok := spec.Configurations[name] + if !ok { + panic(fmt.Sprintf("tlsprofiles: profile %q not found in embedded mozilla_data.json", name)) + } + + p, ciphers, curves := parseProfile(name, cfg) + skippedCiphers = append(skippedCiphers, ciphers...) + skippedCurves = append(skippedCurves, curves...) + + switch name { + case "modern": + modernTLSProfile = p + case "intermediate": + intermediateTLSProfile = p + } + } } -var intermediateTLSProfile = tlsProfile{ - ciphers: cipherSlice{ - cipherNums: []uint16{ - tls.TLS_AES_128_GCM_SHA256, - tls.TLS_AES_256_GCM_SHA384, - tls.TLS_CHACHA20_POLY1305_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, - }, - }, - curves: curveSlice{ - curveNums: []tls.CurveID{ - X25519MLKEM768, - X25519, - prime256v1, - secp384r1, - }, - }, - minTLSVersion: tls.VersionTLS12, +func parseProfile(name string, cfg mozillaConfiguration) (tlsProfile, []string, []string) { + var skippedC, skippedK []string + var cipherNums []uint16 + for _, c := range append(cfg.Ciphersuites, cfg.Ciphers.IANA...) { + id := cipherSuiteId(c) + if id == 0 { + skippedC = append(skippedC, c) + continue + } + cipherNums = append(cipherNums, id) + } + + var curveNums []tls.CurveID + for _, c := range cfg.TLSCurves { + id := curveId(c) + if id == 0 { + skippedK = append(skippedK, c) + continue + } + curveNums = append(curveNums, id) + } + + if len(cfg.TLSVersions) == 0 { + panic(fmt.Sprintf("tlsprofiles: profile %q has no tls_versions in embedded mozilla_data.json", name)) + } + + var version tlsVersion + if err := version.Set(cfg.TLSVersions[0]); err != nil { + panic(fmt.Sprintf("tlsprofiles: profile %q has unrecognized tls_versions[0] %q: %v", name, cfg.TLSVersions[0], err)) + } + + return tlsProfile{ + ciphers: cipherSlice{cipherNums: cipherNums}, + curves: curveSlice{curveNums: curveNums}, + minTLSVersion: version, + }, skippedC, skippedK } diff --git a/internal/shared/util/tlsprofiles/mozilla_data.json b/internal/shared/util/tlsprofiles/mozilla_data.json new file mode 100644 index 000000000..eee17a0c5 --- /dev/null +++ b/internal/shared/util/tlsprofiles/mozilla_data.json @@ -0,0 +1,70 @@ +{ + "version": 6.0, + "href": "https://ssl-config.mozilla.org/guidelines/6.0.json", + "configurations": { + "modern": { + "certificate_curves": ["prime256v1", "secp384r1"], + "certificate_signatures": ["ecdsa-with-SHA256", "ecdsa-with-SHA384", "ecdsa-with-SHA512"], + "certificate_types": ["ecdsa"], + "ciphers": { + "iana": [], + "openssl": [] + }, + "ciphersuites": [ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256" + ], + "dh_param_size": null, + "ecdh_param_size": 256, + "hsts_min_age": 63072000, + "maximum_certificate_lifespan": 90, + "ocsp_staple": true, + "oldest_clients": ["Firefox 63", "Android 10.0", "Chrome 70", "Edge 75", "Java 11", "OpenSSL 1.1.1", "Opera 57", "Safari 12.1"], + "recommended_certificate_lifespan": 90, + "rsa_key_size": null, + "server_preferred_order": false, + "tls_curves": ["X25519MLKEM768", "X25519", "prime256v1", "secp384r1"], + "tls_versions": ["TLSv1.3"] + }, + "intermediate": { + "certificate_curves": ["prime256v1", "secp384r1"], + "certificate_signatures": ["sha256WithRSAEncryption", "ecdsa-with-SHA256", "ecdsa-with-SHA384", "ecdsa-with-SHA512"], + "certificate_types": ["ecdsa", "rsa"], + "ciphers": { + "iana": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" + ], + "openssl": [ + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305" + ] + }, + "ciphersuites": [ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256" + ], + "dh_param_size": 2048, + "ecdh_param_size": 256, + "hsts_min_age": 63072000, + "maximum_certificate_lifespan": 366, + "ocsp_staple": true, + "oldest_clients": ["Firefox 31.3.0", "Android 4.4.2", "Chrome 49", "Edge 15 on Windows 10", "IE 11 on Windows 10", "Java 8u161", "OpenSSL 1.0.1l", "Opera 20", "Safari 9"], + "recommended_certificate_lifespan": 90, + "rsa_key_size": 2048, + "server_preferred_order": false, + "tls_curves": ["X25519MLKEM768", "X25519", "prime256v1", "secp384r1"], + "tls_versions": ["TLSv1.2", "TLSv1.3"] + } + } +} diff --git a/internal/shared/util/tlsprofiles/tlsprofiles_test.go b/internal/shared/util/tlsprofiles/tlsprofiles_test.go index e77984283..9c8075f4b 100644 --- a/internal/shared/util/tlsprofiles/tlsprofiles_test.go +++ b/internal/shared/util/tlsprofiles/tlsprofiles_test.go @@ -166,3 +166,21 @@ func TestProfilesMapCompleteness(t *testing.T) { _, err := findTLSProfile(tlsProfileName("does-not-exist")) require.Error(t, err, "looking up a non-existent profile must return an error") } + +// TestNoSkippedCiphers verifies that all ciphers in mozilla_data.json are +// supported by Go's crypto/tls. If this fails, the JSON contains a cipher that +// needs to be handled — either it has been added to Go's crypto/tls, or it must +// be explicitly excluded from the profiles. +func TestNoSkippedCiphers(t *testing.T) { + require.Empty(t, skippedCiphers, + "cipher(s) in mozilla_data.json are not supported by Go's crypto/tls and were omitted: %v", skippedCiphers) +} + +// TestNoSkippedCurves verifies that all curves in mozilla_data.json are +// supported by Go's crypto/tls. If this fails, the JSON contains a curve that +// needs to be handled — either it has been added to Go's crypto/tls, or it must +// be explicitly excluded from the profiles. +func TestNoSkippedCurves(t *testing.T) { + require.Empty(t, skippedCurves, + "curve(s) in mozilla_data.json are not supported by Go's crypto/tls and were omitted: %v", skippedCurves) +} diff --git a/manifests/experimental-e2e.yaml b/manifests/experimental-e2e.yaml index 4a27cf205..f52b31a39 100644 --- a/manifests/experimental-e2e.yaml +++ b/manifests/experimental-e2e.yaml @@ -1945,6 +1945,49 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + observedPhases: + description: |- + observedPhases records the content hashes of resolved phases + at first successful reconciliation. This is used to detect if + referenced object sources were deleted and recreated with + different content. Each entry covers all fully-resolved object + manifests within a phase, making it source-agnostic. + items: + description: ObservedPhase records the observed content digest of + a resolved phase. + properties: + digest: + description: |- + digest is the digest of the phase's resolved object content + at first successful resolution, in the format ":". + maxLength: 256 + minLength: 1 + type: string + x-kubernetes-validations: + - message: digest must be in the format ':' + rule: self.matches('^[a-z0-9]+:[a-f0-9]+$') + name: + description: name is the phase name matching a phase in spec.phases. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens, and must start with an alphabetic + character and end with an alphanumeric character. + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - digest + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: observedPhases is immutable + rule: self == oldSelf || oldSelf.size() == 0 type: object type: object served: true diff --git a/manifests/experimental.yaml b/manifests/experimental.yaml index 9a8fa0b40..b8e48ba2f 100644 --- a/manifests/experimental.yaml +++ b/manifests/experimental.yaml @@ -1906,6 +1906,49 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + observedPhases: + description: |- + observedPhases records the content hashes of resolved phases + at first successful reconciliation. This is used to detect if + referenced object sources were deleted and recreated with + different content. Each entry covers all fully-resolved object + manifests within a phase, making it source-agnostic. + items: + description: ObservedPhase records the observed content digest of + a resolved phase. + properties: + digest: + description: |- + digest is the digest of the phase's resolved object content + at first successful resolution, in the format ":". + maxLength: 256 + minLength: 1 + type: string + x-kubernetes-validations: + - message: digest must be in the format ':' + rule: self.matches('^[a-z0-9]+:[a-f0-9]+$') + name: + description: name is the phase name matching a phase in spec.phases. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens, and must start with an alphabetic + character and end with an alphanumeric character. + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - digest + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: observedPhases is immutable + rule: self == oldSelf || oldSelf.size() == 0 type: object type: object served: true diff --git a/openshift/tests-extension/go.mod b/openshift/tests-extension/go.mod index 7cdc62377..bee36a11d 100644 --- a/openshift/tests-extension/go.mod +++ b/openshift/tests-extension/go.mod @@ -20,7 +20,7 @@ require ( k8s.io/api v0.35.3 k8s.io/apiextensions-apiserver v0.35.3 k8s.io/apimachinery v0.35.3 - k8s.io/apiserver v0.35.2 + k8s.io/apiserver v0.35.3 k8s.io/client-go v1.5.2 k8s.io/kubernetes v1.35.0 k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 @@ -38,7 +38,7 @@ require ( github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.1 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.22.5 // indirect @@ -84,12 +84,12 @@ require ( github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect - go.opentelemetry.io/otel v1.42.0 // indirect + go.opentelemetry.io/otel v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect - go.opentelemetry.io/otel/metric v1.42.0 // indirect - go.opentelemetry.io/otel/sdk v1.40.0 // indirect - go.opentelemetry.io/otel/trace v1.42.0 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.opentelemetry.io/otel/trace v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.yaml.in/yaml/v2 v2.4.4 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect @@ -104,13 +104,13 @@ require ( golang.org/x/time v0.15.0 // indirect golang.org/x/tools v0.43.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 // indirect google.golang.org/grpc v1.79.3 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/component-base v0.35.2 // indirect + k8s.io/component-base v0.35.3 // indirect k8s.io/component-helpers v0.35.0 // indirect k8s.io/controller-manager v0.33.2 // indirect k8s.io/klog/v2 v2.140.0 // indirect diff --git a/openshift/tests-extension/go.sum b/openshift/tests-extension/go.sum index 2a93000c4..ef1ffbeec 100644 --- a/openshift/tests-extension/go.sum +++ b/openshift/tests-extension/go.sum @@ -25,8 +25,8 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/fxamacker/cbor/v2 v2.9.1 h1:2rWm8B193Ll4VdjsJY28jxs70IdDsHRWgQYAI80+rMQ= +github.com/fxamacker/cbor/v2 v2.9.1/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -200,20 +200,20 @@ go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= -go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= -go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= -go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= -go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= -go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= -go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= -go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= -go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= -go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= -go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -275,8 +275,8 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d h1:t/LOSXPJ9R0B6fnZNyALBRfZBH0Uy0gT+uR+SJ6syqQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 h1:ndE4FoJqsIceKP2oYSnUZqhTdYufCYYkqwtFzfrhI7w= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/.golangci.yml index 38cb9ae10..08081fbde 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/.golangci.yml +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -1,104 +1,116 @@ -# Do not delete linter settings. Linters like gocritic can be enabled on the command line. - -linters-settings: - depguard: - rules: - prevent_unmaintained_packages: - list-mode: strict - files: - - $all - - "!$test" - allow: - - $gostd - - github.com/x448/float16 - deny: - - pkg: io/ioutil - desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" - dupl: - threshold: 100 - funlen: - lines: 100 - statements: 50 - goconst: - ignore-tests: true - min-len: 2 - min-occurrences: 3 - gocritic: - enabled-tags: - - diagnostic - - experimental - - opinionated - - performance - - style - disabled-checks: - - commentedOutCode - - dupImport # https://github.com/go-critic/go-critic/issues/845 - - ifElseChain - - octalLiteral - - paramTypeCombine - - whyNoLint - gofmt: - simplify: false - goimports: - local-prefixes: github.com/fxamacker/cbor - golint: - min-confidence: 0 - govet: - check-shadowing: true - lll: - line-length: 140 - maligned: - suggest-new: true - misspell: - locale: US - staticcheck: - checks: ["all"] - +version: "2" linters: - disable-all: true + default: none enable: - asciicheck - bidichk - depguard - errcheck - - exportloopref + - forbidigo - goconst - gocritic - gocyclo - - gofmt - - goimports - goprintffuncname - gosec - - gosimple - govet - ineffassign - misspell - nilerr - revive - staticcheck - - stylecheck - - typecheck - unconvert - unused - + settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - '!$test' + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: 'replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil' + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 3 + gocritic: + disabled-checks: + - commentedOutCode + - dupImport + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + govet: + enable: + - shadow + lll: + line-length: 140 + misspell: + locale: US + staticcheck: + checks: + - all + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - path: decode.go + text: string ` overflows ` has (\d+) occurrences, make it a constant + - path: decode.go + text: string ` \(range is \[` has (\d+) occurrences, make it a constant + - path: decode.go + text: string `, ` has (\d+) occurrences, make it a constant + - path: decode.go + text: string ` overflows Go's int64` has (\d+) occurrences, make it a constant + - path: decode.go + text: string `\]\)` has (\d+) occurrences, make it a constant + - path: valid.go + text: string ` for type ` has (\d+) occurrences, make it a constant + - path: valid.go + text: 'string `cbor: ` has (\d+) occurrences, make it a constant' + - linters: + - goconst + path: (.+)_test\.go + paths: + - third_party$ + - builtin$ + - examples$ issues: - # max-issues-per-linter default is 50. Set to 0 to disable limit. max-issues-per-linter: 0 - # max-same-issues default is 3. Set to 0 to disable limit. max-same-issues: 0 - - exclude-rules: - - path: decode.go - text: "string ` overflows ` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string `, ` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" - - path: valid.go - text: "string ` for type ` has (\\d+) occurrences, make it a constant" - - path: valid.go - text: "string `cbor: ` has (\\d+) occurrences, make it a constant" +formatters: + enable: + - gofmt + - goimports + settings: + gofmt: + simplify: false + goimports: + local-prefixes: + - github.com/fxamacker/cbor + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/README.md b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/README.md index d072b81c7..f9ae78ec9 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/README.md @@ -702,21 +702,20 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. -- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. -- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. -- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. +v2.9.1 (Mar 29-30, 2026) includes important bugfixes, defensive checks, improved code quality, and more tests. Although not public, the fuzzer was also improved by adding more fuzz tests. -v2.9.0 passed fuzz tests and is production quality. +v2.9.1 passed fuzz tests and is production quality. The minimum version of Go required to build: - v2.8.0 and newer releases require go 1.20+. - v2.7.1 and older releases require go 1.17+. -For more details, see [release notes](https://github.com/fxamacker/cbor/releases). +For more details, see [v2.9.1 release notes](https://github.com/fxamacker/cbor/releases). ### Prior Releases +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. It passed fuzz tests (billions of executions) and is production quality. + [v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. [v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/cache.go b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/cache.go index 5051f110f..5743f3eb2 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/cache.go +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -92,94 +92,126 @@ func newTypeInfo(t reflect.Type) *typeInfo { } type decodingStructType struct { - fields fields - fieldIndicesByName map[string]int - err error - toArray bool + fields decodingFields + fieldIndicesByName map[string]int // Only populated if toArray is false + fieldIndicesByIntKey map[int64]int // Only populated if toArray is false + err error + toArray bool } -// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, -// here's a very basic implementation of an aggregated error. -type multierror []error - -func (m multierror) Error() string { - var sb strings.Builder - for i, err := range m { - sb.WriteString(err.Error()) - if i < len(m)-1 { - sb.WriteString(", ") - } - } - return sb.String() -} - -func getDecodingStructType(t reflect.Type) *decodingStructType { +func getDecodingStructType(t reflect.Type) (*decodingStructType, error) { if v, _ := decodingStructTypeCache.Load(t); v != nil { - return v.(*decodingStructType) + structType := v.(*decodingStructType) + if structType.err != nil { + return nil, structType.err + } + return structType, nil } flds, structOptions := getFields(t) toArray := hasToArrayOption(structOptions) - var errs []error - for i := 0; i < len(flds); i++ { - if flds[i].keyAsInt { - nameAsInt, numErr := strconv.Atoi(flds[i].name) - if numErr != nil { - errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) - break + if toArray { + return getDecodingStructToArrayType(t, flds) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + var fieldIndicesByIntKey map[int64]int + + decFlds := make(decodingFields, len(flds)) + for i, f := range flds { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if f.keyAsInt && f.nameAsInt == 0 { + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &decodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err } - flds[i].nameAsInt = int64(nameAsInt) } - flds[i].typInfo = getTypeInfo(flds[i].typ) - } + if f.keyAsInt { + if fieldIndicesByIntKey == nil { + fieldIndicesByIntKey = make(map[int64]int, len(flds)) + } + // The duplication check is only a safeguard, since getFields() already deduplicates fields. + if _, ok := fieldIndicesByIntKey[f.nameAsInt]; ok { + structType := &decodingStructType{ + err: fmt.Errorf("cbor: two or more fields of %v have the same keyasint value %d", t, f.nameAsInt), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err + } + fieldIndicesByIntKey[f.nameAsInt] = i + } else { + // The duplication check is only a safeguard, since getFields() already deduplicates fields. + if _, ok := fieldIndicesByName[f.name]; ok { + structType := &decodingStructType{ + err: fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, f.name), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err + } + fieldIndicesByName[f.name] = i + } - fieldIndicesByName := make(map[string]int, len(flds)) - for i, fld := range flds { - if _, ok := fieldIndicesByName[fld.name]; ok { - errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) - continue + decFlds[i] = &decodingField{ + field: *f, + typInfo: getTypeInfo(f.typ), } - fieldIndicesByName[fld.name] = i } - var err error - { - var multi multierror - for _, each := range errs { - if each != nil { - multi = append(multi, each) + structType := &decodingStructType{ + fields: decFlds, + fieldIndicesByName: fieldIndicesByName, + fieldIndicesByIntKey: fieldIndicesByIntKey, + } + decodingStructTypeCache.Store(t, structType) + return structType, nil +} + +func getDecodingStructToArrayType(t reflect.Type, flds fields) (*decodingStructType, error) { + decFlds := make(decodingFields, len(flds)) + for i, f := range flds { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if f.keyAsInt && f.nameAsInt == 0 { + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &decodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err } } - if len(multi) == 1 { - err = multi[0] - } else if len(multi) > 1 { - err = multi + + decFlds[i] = &decodingField{ + field: *f, + typInfo: getTypeInfo(f.typ), } } structType := &decodingStructType{ - fields: flds, - fieldIndicesByName: fieldIndicesByName, - err: err, - toArray: toArray, + fields: decFlds, + toArray: true, } decodingStructTypeCache.Store(t, structType) - return structType + return structType, nil } type encodingStructType struct { - fields fields - bytewiseFields fields - lengthFirstFields fields - omitEmptyFieldsIdx []int + fields encodingFields + bytewiseFields encodingFields // Only populated if toArray is false + lengthFirstFields encodingFields // Only populated if toArray is false + omitEmptyFieldsIdx []int // Only populated if toArray is false err error toArray bool } -func (st *encodingStructType) getFields(em *encMode) fields { +func (st *encodingStructType) getFields(em *encMode) encodingFields { switch em.sort { case SortNone, SortFastShuffle: return st.fields @@ -191,7 +223,7 @@ func (st *encodingStructType) getFields(em *encMode) fields { } type bytewiseFieldSorter struct { - fields fields + fields encodingFields } func (x *bytewiseFieldSorter) Len() int { @@ -203,11 +235,11 @@ func (x *bytewiseFieldSorter) Swap(i, j int) { } func (x *bytewiseFieldSorter) Less(i, j int) bool { - return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) < 0 } type lengthFirstFieldSorter struct { - fields fields + fields encodingFields } func (x *lengthFirstFieldSorter) Len() int { @@ -222,13 +254,16 @@ func (x *lengthFirstFieldSorter) Less(i, j int) bool { if len(x.fields[i].cborName) != len(x.fields[j].cborName) { return len(x.fields[i].cborName) < len(x.fields[j].cborName) } - return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) < 0 } func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { if v, _ := encodingStructTypeCache.Load(t); v != nil { structType := v.(*encodingStructType) - return structType, structType.err + if structType.err != nil { + return nil, structType.err + } + return structType, nil } flds, structOptions := getFields(t) @@ -237,111 +272,119 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { return getEncodingStructToArrayType(t, flds) } - var err error var hasKeyAsInt bool var hasKeyAsStr bool var omitEmptyIdx []int + + encFlds := make(encodingFields, len(flds)) + e := getEncodeBuffer() - for i := 0; i < len(flds); i++ { + defer putEncodeBuffer(e) + + for i, f := range flds { + encFlds[i] = &encodingField{field: *f} + ef := encFlds[i] + // Get field's encodeFunc - flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ) - if flds[i].ef == nil { - err = &UnsupportedTypeError{t} - break + ef.ef, ef.ief, ef.izf = getEncodeFunc(f.typ) + if ef.ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return nil, structType.err } // Encode field name - if flds[i].keyAsInt { - nameAsInt, numErr := strconv.Atoi(flds[i].name) - if numErr != nil { - err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") - break + if f.keyAsInt { + if f.nameAsInt == 0 { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &encodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + encodingStructTypeCache.Store(t, structType) + return nil, structType.err + } } - flds[i].nameAsInt = int64(nameAsInt) + nameAsInt := f.nameAsInt if nameAsInt >= 0 { - encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) //nolint:gosec } else { n := nameAsInt*(-1) - 1 - encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) //nolint:gosec } - flds[i].cborName = make([]byte, e.Len()) - copy(flds[i].cborName, e.Bytes()) + ef.cborName = make([]byte, e.Len()) + copy(ef.cborName, e.Bytes()) e.Reset() hasKeyAsInt = true } else { - encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) - flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) - n := copy(flds[i].cborName, e.Bytes()) - copy(flds[i].cborName[n:], flds[i].name) + encodeHead(e, byte(cborTypeTextString), uint64(len(f.name))) + ef.cborName = make([]byte, e.Len()+len(f.name)) + n := copy(ef.cborName, e.Bytes()) + copy(ef.cborName[n:], f.name) e.Reset() // If cborName contains a text string, then cborNameByteString contains a // string that has the byte string major type but is otherwise identical to // cborName. - flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) - copy(flds[i].cborNameByteString, flds[i].cborName) + ef.cborNameByteString = make([]byte, len(ef.cborName)) + copy(ef.cborNameByteString, ef.cborName) // Reset encoded CBOR type to byte string, preserving the "additional // information" bits: - flds[i].cborNameByteString[0] = byte(cborTypeByteString) | - getAdditionalInformation(flds[i].cborNameByteString[0]) + ef.cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(ef.cborNameByteString[0]) hasKeyAsStr = true } // Check if field can be omitted when empty - if flds[i].omitEmpty { + if f.omitEmpty { omitEmptyIdx = append(omitEmptyIdx, i) } } - putEncodeBuffer(e) - - if err != nil { - structType := &encodingStructType{err: err} - encodingStructTypeCache.Store(t, structType) - return structType, structType.err - } // Sort fields by canonical order - bytewiseFields := make(fields, len(flds)) - copy(bytewiseFields, flds) + bytewiseFields := make(encodingFields, len(encFlds)) + copy(bytewiseFields, encFlds) sort.Sort(&bytewiseFieldSorter{bytewiseFields}) lengthFirstFields := bytewiseFields if hasKeyAsInt && hasKeyAsStr { - lengthFirstFields = make(fields, len(flds)) - copy(lengthFirstFields, flds) + lengthFirstFields = make(encodingFields, len(encFlds)) + copy(lengthFirstFields, encFlds) sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) } structType := &encodingStructType{ - fields: flds, + fields: encFlds, bytewiseFields: bytewiseFields, lengthFirstFields: lengthFirstFields, omitEmptyFieldsIdx: omitEmptyIdx, } encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return structType, nil } func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { - for i := 0; i < len(flds); i++ { - // Get field's encodeFunc - flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ) - if flds[i].ef == nil { + encFlds := make(encodingFields, len(flds)) + for i, f := range flds { + encFlds[i] = &encodingField{field: *f} + encFlds[i].ef, encFlds[i].ief, encFlds[i].izf = getEncodeFunc(f.typ) + if encFlds[i].ef == nil { structType := &encodingStructType{err: &UnsupportedTypeError{t}} encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return nil, structType.err } } structType := &encodingStructType{ - fields: flds, + fields: encFlds, toArray: true, } encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return structType, nil } func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) { diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/decode.go b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/decode.go index f0bdc3b38..03fd7f8b0 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/decode.go +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -16,7 +16,6 @@ import ( "math/big" "reflect" "strconv" - "strings" "time" "unicode/utf8" @@ -326,14 +325,14 @@ func (dmkm DupMapKeyMode) valid() bool { return dmkm >= 0 && dmkm < maxDupMapKeyMode } -// IndefLengthMode specifies whether to allow indefinite length items. +// IndefLengthMode specifies whether to allow indefinite-length items. type IndefLengthMode int const ( - // IndefLengthAllowed allows indefinite length items. + // IndefLengthAllowed allows indefinite-length items. IndefLengthAllowed IndefLengthMode = iota - // IndefLengthForbidden disallows indefinite length items. + // IndefLengthForbidden disallows indefinite-length items. IndefLengthForbidden maxIndefLengthMode @@ -378,6 +377,7 @@ const ( // - int64 if value fits // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 // - return UnmarshalTypeError if value > math.MaxInt64 + // // Deprecated: IntDecConvertSigned should not be used. // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. IntDecConvertSigned @@ -811,7 +811,7 @@ type DecOptions struct { // Default is 128*1024=131072 and it can be set to [16, 2147483647] MaxMapPairs int - // IndefLength specifies whether to allow indefinite length CBOR items. + // IndefLength specifies whether to allow indefinite-length CBOR items. IndefLength IndefLengthMode // TagsMd specifies whether to allow CBOR tags (major type 6). @@ -1055,7 +1055,7 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore } if !opts.ExtraReturnErrors.valid() { - return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) //nolint:gosec } if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { @@ -1149,8 +1149,8 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore unrecognizedTagToAny: opts.UnrecognizedTagToAny, timeTagToAny: opts.TimeTagToAny, simpleValues: simpleValues, - nanDec: opts.NaN, - infDec: opts.Inf, + nan: opts.NaN, + inf: opts.Inf, byteStringToTime: opts.ByteStringToTime, byteStringExpectedFormat: opts.ByteStringExpectedFormat, bignumTag: opts.BignumTag, @@ -1230,8 +1230,8 @@ type decMode struct { unrecognizedTagToAny UnrecognizedTagToAnyMode timeTagToAny TimeTagToAnyMode simpleValues *SimpleValueRegistry - nanDec NaNMode - infDec InfMode + nan NaNMode + inf InfMode byteStringToTime ByteStringToTimeMode byteStringExpectedFormat ByteStringExpectedFormatMode bignumTag BignumTagMode @@ -1272,8 +1272,8 @@ func (dm *decMode) DecOptions() DecOptions { UnrecognizedTagToAny: dm.unrecognizedTagToAny, TimeTagToAny: dm.timeTagToAny, SimpleValues: simpleValues, - NaN: dm.nanDec, - Inf: dm.infDec, + NaN: dm.nan, + Inf: dm.inf, ByteStringToTime: dm.byteStringToTime, ByteStringExpectedFormat: dm.byteStringExpectedFormat, BignumTag: dm.bignumTag, @@ -1583,11 +1583,11 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin _, ai, val := d.getHead() switch ai { case additionalInformationAsFloat16: - f := float64(float16.Frombits(uint16(val)).Float32()) + f := float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec return fillFloat(t, f, v) case additionalInformationAsFloat32: - f := float64(math.Float32frombits(uint32(val))) + f := float64(math.Float32frombits(uint32(val))) //nolint:gosec return fillFloat(t, f, v) case additionalInformationAsFloat64: @@ -1595,10 +1595,10 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin return fillFloat(t, f, v) default: // ai <= 24 - if d.dm.simpleValues.rejected[SimpleValue(val)] { + if d.dm.simpleValues.rejected[SimpleValue(val)] { //nolint:gosec return &UnacceptableDataItemError{ CBORType: t.String(), - Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", //nolint:gosec } } @@ -1677,20 +1677,23 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin return d.parseToValue(v, tInfo) case cborTypeArray: - if tInfo.nonPtrKind == reflect.Slice { + switch tInfo.nonPtrKind { + case reflect.Slice: return d.parseArrayToSlice(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Array { + case reflect.Array: return d.parseArrayToArray(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Struct { + case reflect.Struct: return d.parseArrayToStruct(v, tInfo) } + d.skip() return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} case cborTypeMap: - if tInfo.nonPtrKind == reflect.Struct { + switch tInfo.nonPtrKind { + case reflect.Struct: return d.parseMapToStruct(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Map { + case reflect.Map: return d.parseMapToMap(v, tInfo) } d.skip() @@ -1745,8 +1748,8 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { // Read tag number _, _, tagNum := d.getHead() if tagNum != 0 && tagNum != 1 { - d.skip() // skip tag content - return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") //nolint:gosec } } } else { @@ -1815,10 +1818,10 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { var f float64 switch ai { case additionalInformationAsFloat16: - f = float64(float16.Frombits(uint16(val)).Float32()) + f = float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec case additionalInformationAsFloat32: - f = float64(math.Float32frombits(uint32(val))) + f = float64(math.Float32frombits(uint32(val))) //nolint:gosec case additionalInformationAsFloat64: f = math.Float64frombits(val) @@ -1832,6 +1835,13 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { return time.Time{}, true, nil } seconds, fractional := math.Modf(f) + if seconds > math.MaxInt64 || seconds < math.MinInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%v overflows Go's int64", f), + } + } return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil default: @@ -2145,14 +2155,14 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc case cborTypePrimitives: _, ai, val := d.getHead() - if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { //nolint:gosec return nil, &UnacceptableDataItemError{ CBORType: t.String(), - Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", //nolint:gosec } } if ai < 20 || ai == 24 { - return SimpleValue(val), nil + return SimpleValue(val), nil //nolint:gosec } switch ai { @@ -2165,11 +2175,11 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc return nil, nil case additionalInformationAsFloat16: - f := float64(float16.Frombits(uint16(val)).Float32()) + f := float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec return f, nil case additionalInformationAsFloat32: - f := float64(math.Float32frombits(uint32(val))) + f := float64(math.Float32frombits(uint32(val))) //nolint:gosec return f, nil case additionalInformationAsFloat64: @@ -2202,16 +2212,16 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc func (d *decoder) parseByteString() ([]byte, bool) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() if !indefiniteLength { - b := d.data[d.off : d.off+int(val)] - d.off += int(val) + b := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec return b, false } - // Process indefinite length string chunks. + // Process indefinite-length string chunks. b := []byte{} for !d.foundBreak() { _, _, val = d.getHead() - b = append(b, d.data[d.off:d.off+int(val)]...) - d.off += int(val) + b = append(b, d.data[d.off:d.off+int(val)]...) //nolint:gosec + d.off += int(val) //nolint:gosec } return b, true } @@ -2300,19 +2310,19 @@ func (d *decoder) applyByteStringTextConversion( func (d *decoder) parseTextString() ([]byte, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() if !indefiniteLength { - b := d.data[d.off : d.off+int(val)] - d.off += int(val) + b := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { return nil, &SemanticError{"cbor: invalid UTF-8 string"} } return b, nil } - // Process indefinite length string chunks. + // Process indefinite-length string chunks. b := []byte{} for !d.foundBreak() { _, _, val = d.getHead() - x := d.data[d.off : d.off+int(val)] - d.off += int(val) + x := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { for !d.foundBreak() { d.skip() // Skip remaining chunk on error @@ -2327,7 +2337,7 @@ func (d *decoder) parseTextString() ([]byte, error) { func (d *decoder) parseArray() ([]any, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance } @@ -2349,7 +2359,7 @@ func (d *decoder) parseArray() ([]any, error) { func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance } @@ -2371,7 +2381,7 @@ func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec gi := 0 vLen := v.Len() var err error @@ -2400,7 +2410,7 @@ func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { func (d *decoder) parseMap() (any, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec m := make(map[any]any) var k, e any var err, lastErr error @@ -2465,7 +2475,7 @@ func (d *decoder) parseMap() (any, error) { func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if v.IsNil() { mapsize := count if !hasSize { @@ -2566,9 +2576,9 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli } func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { - structType := getDecodingStructType(tInfo.nonPtrType) - if structType.err != nil { - return structType.err + structType, structTypeErr := getDecodingStructType(tInfo.nonPtrType) + if structTypeErr != nil { + return structTypeErr } if !structType.toArray { @@ -2584,7 +2594,7 @@ func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { start := d.off _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size } @@ -2637,11 +2647,72 @@ func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { return err } -// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +// skipMapEntriesFromIndex skips remaining map entries starting from index i. +func (d *decoder) skipMapEntriesFromIndex(i, count int, hasSize bool) { + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() + d.skip() + } +} + +// skipMapForDupKey skips the current map value and all remaining map entries, +// then returns a DupMapKeyError for the given key at map index i. +func (d *decoder) skipMapForDupKey(dupKey any, i, count int, hasSize bool) error { + // Skip the value of the duplicate key. + d.skip() + // Skip all remaining map entries. + d.skipMapEntriesFromIndex(i+1, count, hasSize) + return &DupMapKeyError{dupKey, i} +} + +// skipMapForUnknownField skips the current map value and all remaining map entries, +// then returns a UnknownFieldError for the given key at map index i. +func (d *decoder) skipMapForUnknownField(i, count int, hasSize bool) error { + // Skip the value of the unknown key. + d.skip() + // Skip all remaining map entries. + d.skipMapEntriesFromIndex(i+1, count, hasSize) + return &UnknownFieldError{i} +} + +// decodeToStructField decodes the next CBOR value into the struct field f in v. +// If the field cannot be resolved, the CBOR value is skipped. +func (d *decoder) decodeToStructField(v reflect.Value, f *decodingField, tInfo *typeInfo) error { + var fv reflect.Value + + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + var err error + fv, err = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if !fv.IsValid() { + d.skip() + return err + } + } + + err := d.parseToValue(fv, f.typInfo) + if err != nil { + if typeError, ok := err.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + } + return err + } + + return nil +} + func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo - structType := getDecodingStructType(tInfo.nonPtrType) - if structType.err != nil { - return structType.err + structType, structTypeErr := getDecodingStructType(tInfo.nonPtrType) + if structTypeErr != nil { + return structTypeErr } if structType.toArray { @@ -2654,14 +2725,12 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n } } - var err, lastErr error - // Get CBOR map size _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec - // Keeps track of matched struct fields + // Keep track of matched struct fields to detect duplicate map keys. var foundFldIdx []bool { const maxStackFields = 128 @@ -2675,99 +2744,80 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n } } - // Keeps track of CBOR map keys to detect duplicate map key - keyCount := 0 - var mapKeys map[any]struct{} - - errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + // Keep track of unmatched CBOR map keys to detect duplicate map keys. + var unmatchedMapKeys map[any]struct{} -MapEntryLoop: - for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - var f *field + var err error - // If duplicate field detection is enabled and the key at index j did not match any - // field, k will hold the map key. - var k any + caseInsensitive := d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { t := d.nextCBORType() - if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + + // Reclassify disallowed byte string keys so they fall to the default case. + // keyType is only used for branch control. + keyType := t + if t == cborTypeByteString && d.dm.fieldNameByteString != FieldNameByteStringAllowed { + keyType = 0xff + } + + switch keyType { + case cborTypeTextString, cborTypeByteString: var keyBytes []byte if t == cborTypeTextString { - keyBytes, lastErr = d.parseTextString() - if lastErr != nil { + var parseErr error + keyBytes, parseErr = d.parseTextString() + if parseErr != nil { if err == nil { - err = lastErr + err = parseErr } - d.skip() // skip value + d.skip() // Skip value continue } } else { // cborTypeByteString keyBytes, _ = d.parseByteString() } - // Check for exact match on field name. - if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { - fld := structType.fields[i] + // Find matching struct field (exact match, then case-insensitive fallback). + if fldIdx, ok := findStructFieldByKey(structType, keyBytes, caseInsensitive); ok { + fld := structType.fields[fldIdx] - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{fld.name, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() + switch checkDupField(d.dm, foundFldIdx, fldIdx) { + case mapActionParseValueAndContinue: + if fieldErr := d.decodeToStructField(v, fld, tInfo); fieldErr != nil && err == nil { + err = fieldErr } - return err - } else { - // discard repeated match + continue + case mapActionSkipAllAndReturnError: + return d.skipMapForDupKey(string(keyBytes), i, count, hasSize) + case mapActionSkipValueAndContinue: d.skip() - continue MapEntryLoop + continue } } - // Find field with case-insensitive match - if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { - keyLen := len(keyBytes) - keyString := string(keyBytes) - for i := 0; i < len(structType.fields); i++ { - fld := structType.fields[i] - if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{keyString, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } else { - // discard repeated match - d.skip() - continue MapEntryLoop - } - break - } - } + // No matching struct field found. + if unmatchedErr := handleUnmatchedMapKey(d, string(keyBytes), i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } - if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { - k = string(keyBytes) - } - } else if t <= cborTypeNegativeInt { // uint/int + case cborTypePositiveInt, cborTypeNegativeInt: var nameAsInt int64 if t == cborTypePositiveInt { _, _, val := d.getHead() - nameAsInt = int64(val) + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(val) //nolint:gosec } else { _, _, val := d.getHead() if val > math.MaxInt64 { @@ -2781,39 +2831,35 @@ MapEntryLoop: d.skip() // skip value continue } - nameAsInt = int64(-1) ^ int64(val) - } - - // Find field - for i := 0; i < len(structType.fields); i++ { - fld := structType.fields[i] - if fld.keyAsInt && fld.nameAsInt == nameAsInt { - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{nameAsInt, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } else { - // discard repeated match - d.skip() - continue MapEntryLoop + nameAsInt = int64(-1) ^ int64(val) //nolint:gosec + } + + // Find field by integer key + if fldIdx, ok := structType.fieldIndicesByIntKey[nameAsInt]; ok { + fld := structType.fields[fldIdx] + + switch checkDupField(d.dm, foundFldIdx, fldIdx) { + case mapActionParseValueAndContinue: + if fieldErr := d.decodeToStructField(v, fld, tInfo); fieldErr != nil && err == nil { + err = fieldErr } - break + continue + case mapActionSkipAllAndReturnError: + return d.skipMapForDupKey(nameAsInt, i, count, hasSize) + case mapActionSkipValueAndContinue: + d.skip() + continue } } - if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { - k = nameAsInt + // No matching struct field found. + if unmatchedErr := handleUnmatchedMapKey(d, nameAsInt, i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } - } else { + + default: + // CBOR map keys that can't be matched to any struct field. + if err == nil { err = &UnmarshalTypeError{ CBORType: t.String(), @@ -2821,97 +2867,31 @@ MapEntryLoop: errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", } } + + var otherKey any if d.dm.dupMapKey == DupMapKeyEnforcedAPF { // parse key - k, lastErr = d.parse(true) - if lastErr != nil { + var parseErr error + otherKey, parseErr = d.parse(true) + if parseErr != nil { d.skip() // skip value continue } // Detect if CBOR map key can be used as Go map key. - if !isHashableValue(reflect.ValueOf(k)) { + if !isHashableValue(reflect.ValueOf(otherKey)) { d.skip() // skip value continue } } else { d.skip() // skip key } - } - - if f == nil { - if errOnUnknownField { - err = &UnknownFieldError{j} - d.skip() // Skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } - - // Two map keys that match the same struct field are immediately considered - // duplicates. This check detects duplicates between two map keys that do - // not match a struct field. If unknown field errors are enabled, then this - // check is never reached. - if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - if mapKeys == nil { - mapKeys = make(map[any]struct{}, 1) - } - mapKeys[k] = struct{}{} - newKeyCount := len(mapKeys) - if newKeyCount == keyCount { - err = &DupMapKeyError{k, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } - keyCount = newKeyCount - } - - d.skip() // Skip value - continue - } - - // Get field value by index - var fv reflect.Value - if len(f.idx) == 1 { - fv = v.Field(f.idx[0]) - } else { - fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { - // Return a new value for embedded field null pointer to point to, or return error. - if !v.CanSet() { - return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) - } - v.Set(reflect.New(v.Type().Elem())) - return v, nil - }) - if lastErr != nil && err == nil { - err = lastErr - } - if !fv.IsValid() { - d.skip() - continue - } - } - if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { - if err == nil { - if typeError, ok := lastErr.(*UnmarshalTypeError); ok { - typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name - err = typeError - } else { - err = lastErr - } + if unmatchedErr := handleUnmatchedMapKey(d, otherKey, i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } } } + return err } @@ -2958,15 +2938,15 @@ func (d *decoder) skip() { switch t { case cborTypeByteString, cborTypeTextString: - d.off += int(val) + d.off += int(val) //nolint:gosec case cborTypeArray: - for i := 0; i < int(val); i++ { + for i := 0; i < int(val); i++ { //nolint:gosec d.skip() } case cborTypeMap: - for i := 0; i < int(val)*2; i++ { + for i := 0; i < int(val)*2; i++ { //nolint:gosec d.skip() } diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go new file mode 100644 index 000000000..3c8c423ad --- /dev/null +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go @@ -0,0 +1,98 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import "strings" + +// mapAction represents the next action during decoding a CBOR map to a Go struct. +type mapAction int + +const ( + mapActionParseValueAndContinue mapAction = iota // The caller should process the map value. + mapActionSkipValueAndContinue // The caller should skip the map value. + mapActionSkipAllAndReturnError // The caller should skip the rest of the map and return an error. +) + +// checkDupField checks if a struct field at index i has already been matched and returns the next action. +// If not matched, it marks the field as matched and returns mapActionParseValueAndContinue. +// If matched and DupMapKeyEnforcedAPF is specified in the given dm, it returns mapActionSkipAllAndReturnError. +// If matched and DupMapKeyEnforcedAPF is not specified in the given dm, it returns mapActionSkipValueAndContinue. +func checkDupField(dm *decMode, foundFldIdx []bool, i int) mapAction { + if !foundFldIdx[i] { + foundFldIdx[i] = true + return mapActionParseValueAndContinue + } + if dm.dupMapKey == DupMapKeyEnforcedAPF { + return mapActionSkipAllAndReturnError + } + return mapActionSkipValueAndContinue +} + +// findStructFieldByKey finds a struct field matching keyBytes by name. +// It tries an exact match first. If no exact match is found and +// caseInsensitive is true, it falls back to a case-insensitive search. +// findStructFieldByKey returns the field index and true, or -1 and false. +func findStructFieldByKey( + structType *decodingStructType, + keyBytes []byte, + caseInsensitive bool, +) (int, bool) { + if fldIdx, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + return fldIdx, true + } + if caseInsensitive { + return findFieldCaseInsensitive(structType.fields, string(keyBytes)) + } + return -1, false +} + +// findFieldCaseInsensitive returns the index of the first field whose name +// case-insensitively matches key, or -1 and false if no field matches. +func findFieldCaseInsensitive(flds decodingFields, key string) (int, bool) { + keyLen := len(key) + for i, f := range flds { + if f.keyAsInt { + continue + } + if len(f.name) == keyLen && strings.EqualFold(f.name, key) { + return i, true + } + } + return -1, false +} + +// handleUnmatchedMapKey handles a map entry whose key does not match any struct +// field. It can return UnknownFieldError or DupMapKeyError. +// handleUnmatchedMapKey consumes the CBOR value, so the caller doesn't need to skip any values. +// If an error is returned, the caller should abort parsing the map and return the error. +// If no error is returned, the caller should continue to process the next map pair. +func handleUnmatchedMapKey( + d *decoder, + key any, + i int, + count int, + hasSize bool, + // *map[any]struct{} is used here because we use lazy initialization for uks + uks *map[any]struct{}, //nolint:gocritic +) error { + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + + if errOnUnknownField { + return d.skipMapForUnknownField(i, count, hasSize) + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if *uks == nil { + *uks = make(map[any]struct{}) + } + if _, dup := (*uks)[key]; dup { + return d.skipMapForDupKey(key, i, count, hasSize) + } + (*uks)[key] = struct{}{} + } + + // Skip value. + d.skip() + return nil +} diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/diagnose.go index 44afb8660..42a67ad11 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/diagnose.go +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -51,11 +51,8 @@ const ( maxByteStringEncoding ) -func (bse ByteStringEncoding) valid() error { - if bse >= maxByteStringEncoding { - return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) - } - return nil +func (bse ByteStringEncoding) valid() bool { + return bse < maxByteStringEncoding } // DiagOptions specifies Diag options. @@ -104,8 +101,8 @@ func (opts DiagOptions) DiagMode() (DiagMode, error) { } func (opts DiagOptions) diagMode() (*diagMode, error) { - if err := opts.ByteStringEncoding.valid(); err != nil { - return nil, err + if !opts.ByteStringEncoding.valid() { + return nil, errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(opts.ByteStringEncoding))) } decMode, err := DecOptions{ @@ -360,7 +357,7 @@ func (di *diagnose) item() error { //nolint:gocyclo case cborTypeArray: _, _, val := di.d.getHead() - count := int(val) + count := int(val) //nolint:gosec di.w.WriteByte('[') for i := 0; i < count; i++ { @@ -376,7 +373,7 @@ func (di *diagnose) item() error { //nolint:gocyclo case cborTypeMap: _, _, val := di.d.getHead() - count := int(val) + count := int(val) //nolint:gosec di.w.WriteByte('{') for i := 0; i < count; i++ { @@ -477,8 +474,8 @@ func (di *diagnose) item() error { //nolint:gocyclo func (di *diagnose) writeU16(val rune) { di.w.WriteString("\\u") var in [2]byte - in[0] = byte(val >> 8) - in[1] = byte(val) + in[0] = byte(val >> 8) //nolint:gosec + in[1] = byte(val) //nolint:gosec sz := hex.EncodedLen(len(in)) di.w.Grow(sz) dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] @@ -608,7 +605,7 @@ func (di *diagnose) encodeTextString(val string, quote byte) error { c, size := utf8.DecodeRuneInString(val[i:]) switch { - case c == utf8.RuneError: + case c == utf8.RuneError && size == 1: return &SemanticError{"cbor: invalid UTF-8 string"} case c < utf16SurrSelf: @@ -631,7 +628,7 @@ func (di *diagnose) encodeFloat(ai byte, val uint64) error { f64 := float64(0) switch ai { case additionalInformationAsFloat16: - f16 := float16.Frombits(uint16(val)) + f16 := float16.Frombits(uint16(val)) //nolint:gosec switch { case f16.IsNaN(): di.w.WriteString("NaN") @@ -647,7 +644,7 @@ func (di *diagnose) encodeFloat(ai byte, val uint64) error { } case additionalInformationAsFloat32: - f32 := math.Float32frombits(uint32(val)) + f32 := math.Float32frombits(uint32(val)) //nolint:gosec switch { case f32 != f32: di.w.WriteString("NaN") diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/encode.go b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/encode.go index c550617c3..e65a29d8a 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/encode.go +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -30,7 +30,7 @@ import ( // If value implements the Marshaler interface, Marshal calls its // MarshalCBOR method. // -// If value implements encoding.BinaryMarshaler, Marhsal calls its +// If value implements encoding.BinaryMarshaler, Marshal calls its // MarshalBinary method and encode it as CBOR byte string. // // Boolean values encode as CBOR booleans (type 7). @@ -343,7 +343,7 @@ const ( // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339. // NOTE: User applications can avoid including the RFC3339 numeric offset by: // - providing a time.Time value set to UTC, or - // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339. + // - using the TimeUnix, TimeUnixMicro, TimeUnixDynamic, or TimeRFC3339NanoUTC option. TimeRFC3339 // TimeRFC3339Nano causes time.Time to encode to a CBOR time (tag 0) with a text string content @@ -351,9 +351,13 @@ const ( // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339. // NOTE: User applications can avoid including the RFC3339 numeric offset by: // - providing a time.Time value set to UTC, or - // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339Nano. + // - using the TimeUnix, TimeUnixMicro, TimeUnixDynamic, or TimeRFC3339NanoUTC option. TimeRFC3339Nano + // TimeRFC3339NanoUTC causes time.Time to encode to a CBOR time (tag 0) with a text string content + // representing UTC time using nanosecond precision in RFC3339 format. + TimeRFC3339NanoUTC + maxTimeMode ) @@ -436,7 +440,7 @@ const ( // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). FieldNameToTextString FieldNameMode = iota - // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + // FieldNameToByteString encodes struct fields to CBOR byte string (major type 2). FieldNameToByteString maxFieldNameMode @@ -567,7 +571,7 @@ type EncOptions struct { // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. TimeTag EncTagMode - // IndefLength specifies whether to allow indefinite length CBOR items. + // IndefLength specifies whether to allow indefinite-length CBOR items. IndefLength IndefLengthMode // NilContainers specifies how to encode nil slices and maps. @@ -1132,10 +1136,11 @@ func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { if fopt == ShortestFloat16 { var f16 float16.Float16 p := float16.PrecisionFromfloat32(f32) - if p == float16.PrecisionExact { + switch p { + case float16.PrecisionExact: // Roundtrip float32->float16->float32 test isn't needed. f16 = float16.Fromfloat32(f32) - } else if p == float16.PrecisionUnknown { + case float16.PrecisionUnknown: // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. f16 = float16.Fromfloat32(f32) if f16.Float32() == f32 { @@ -1293,10 +1298,10 @@ func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { if slen == 0 { return e.WriteByte(byte(cborTypeByteString)) } - encodeHead(e, byte(cborTypeByteString), uint64(slen)) + encodeHead(e, byte(cborTypeByteString), uint64(slen)) //nolint:gosec if vk == reflect.Array { for i := 0; i < slen; i++ { - e.WriteByte(byte(v.Index(i).Uint())) + e.WriteByte(byte(v.Index(i).Uint())) //nolint:gosec } return nil } @@ -1333,7 +1338,7 @@ func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) if alen == 0 { return e.WriteByte(byte(cborTypeArray)) } - encodeHead(e, byte(cborTypeArray), uint64(alen)) + encodeHead(e, byte(cborTypeArray), uint64(alen)) //nolint:gosec for i := 0; i < alen; i++ { if err := ae.f(e, em, v.Index(i)); err != nil { return err @@ -1364,7 +1369,7 @@ func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) er return e.WriteByte(byte(cborTypeMap)) } - encodeHead(e, byte(cborTypeMap), uint64(mlen)) + encodeHead(e, byte(cborTypeMap), uint64(mlen)) //nolint:gosec if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { return me.e(e, em, v, nil) } @@ -1427,7 +1432,7 @@ func (x *bytewiseKeyValueSorter) Swap(i, j int) { func (x *bytewiseKeyValueSorter) Less(i, j int) bool { kvi, kvj := x.kvs[i], x.kvs[j] - return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) < 0 } type lengthFirstKeyValueSorter struct { @@ -1448,7 +1453,7 @@ func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { return keyLengthDifference < 0 } - return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) < 0 } var keyValuePool = sync.Pool{} @@ -1535,8 +1540,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // Head is rewritten later if actual encoded field count is different from struct field count. encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) - kvbegin := e.Len() - kvcount := 0 + kvBeginOffset := e.Len() + kvCount := 0 for offset := 0; offset < len(flds); offset++ { f := flds[(start+offset)%len(flds)] @@ -1582,10 +1587,10 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { return err } - kvcount++ + kvCount++ } - if len(flds) == kvcount { + if len(flds) == kvCount { // Encoded element count in head is the same as actual element count. return nil } @@ -1593,8 +1598,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // Overwrite the bytes that were reserved for the head before encoding the map entries. var actualHeadLen int { - headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) - actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + headbuf := *bytes.NewBuffer(e.Bytes()[kvBeginOffset-encodedHeadLen : kvBeginOffset-encodedHeadLen : kvBeginOffset]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvCount)) } if actualHeadLen == encodedHeadLen { @@ -1607,8 +1612,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // encoded. The encoded entries are offset to the right by the number of excess reserved // bytes. Shift the entries left to remove the gap. excessReservedBytes := encodedHeadLen - actualHeadLen - dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] - src := e.Bytes()[kvbegin:e.Len()] + dst := e.Bytes()[kvBeginOffset-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvBeginOffset:e.Len()] copy(dst, src) // After shifting, the excess bytes are at the end of the output buffer and they are @@ -1633,7 +1638,7 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { } if em.timeTag == EncTagRequired { tagNumber := 1 - if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano || em.time == TimeRFC3339NanoUTC { tagNumber = 0 } encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) @@ -1650,7 +1655,7 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { case TimeUnixDynamic: t = t.UTC().Round(time.Microsecond) - secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) //nolint:gosec if nsecs == 0 { return encodeInt(e, em, reflect.ValueOf(secs)) } @@ -1661,6 +1666,10 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { s := t.Format(time.RFC3339) return encodeString(e, em, reflect.ValueOf(s)) + case TimeRFC3339NanoUTC: + s := t.UTC().Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + default: // TimeRFC3339Nano s := t.Format(time.RFC3339Nano) return encodeString(e, em, reflect.ValueOf(s)) diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/simplevalue.go index 30f72814f..9912e855c 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/simplevalue.go +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -93,6 +93,6 @@ func (sv *SimpleValue) unmarshalCBOR(data []byte) error { // It is safe to cast val to uint8 here because // - data is already verified to be well-formed CBOR simple value and // - val is <= math.MaxUint8. - *sv = SimpleValue(val) + *sv = SimpleValue(val) //nolint:gosec return nil } diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/stream.go b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/stream.go index 7ac6d7d67..da4c8f210 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/stream.go +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -171,14 +171,20 @@ func NewEncoder(w io.Writer) *Encoder { // Encode writes the CBOR encoding of v. func (enc *Encoder) Encode(v any) error { - if len(enc.indefTypes) > 0 && v != nil { - indefType := enc.indefTypes[len(enc.indefTypes)-1] - if indefType == cborTypeTextString { + if len(enc.indefTypes) > 0 { + switch enc.indefTypes[len(enc.indefTypes)-1] { + case cborTypeTextString: + if v == nil { + return errors.New("cbor: cannot encode nil for indefinite-length text string") + } k := reflect.TypeOf(v).Kind() if k != reflect.String { return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") } - } else if indefType == cborTypeByteString { + case cborTypeByteString: + if v == nil { + return errors.New("cbor: cannot encode nil for indefinite-length byte string") + } t := reflect.TypeOf(v) k := t.Kind() if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { @@ -198,35 +204,35 @@ func (enc *Encoder) Encode(v any) error { return err } -// StartIndefiniteByteString starts byte string encoding of indefinite length. +// StartIndefiniteByteString starts indefinite-length byte string encoding. // Subsequent calls of (*Encoder).Encode() encodes definite length byte strings // ("chunks") as one contiguous string until EndIndefinite is called. func (enc *Encoder) StartIndefiniteByteString() error { return enc.startIndefinite(cborTypeByteString) } -// StartIndefiniteTextString starts text string encoding of indefinite length. +// StartIndefiniteTextString starts indefinite-length text string encoding. // Subsequent calls of (*Encoder).Encode() encodes definite length text strings // ("chunks") as one contiguous string until EndIndefinite is called. func (enc *Encoder) StartIndefiniteTextString() error { return enc.startIndefinite(cborTypeTextString) } -// StartIndefiniteArray starts array encoding of indefinite length. +// StartIndefiniteArray starts indefinite-length array encoding. // Subsequent calls of (*Encoder).Encode() encodes elements of the array // until EndIndefinite is called. func (enc *Encoder) StartIndefiniteArray() error { return enc.startIndefinite(cborTypeArray) } -// StartIndefiniteMap starts array encoding of indefinite length. +// StartIndefiniteMap starts indefinite-length map encoding. // Subsequent calls of (*Encoder).Encode() encodes elements of the map // until EndIndefinite is called. func (enc *Encoder) StartIndefiniteMap() error { return enc.startIndefinite(cborTypeMap) } -// EndIndefinite closes last opened indefinite length value. +// EndIndefinite closes last opened indefinite-length value. func (enc *Encoder) EndIndefinite() error { if len(enc.indefTypes) == 0 { return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") @@ -238,18 +244,22 @@ func (enc *Encoder) EndIndefinite() error { return err } -var cborIndefHeader = map[cborType][]byte{ - cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, - cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, - cborTypeArray: {cborArrayWithIndefiniteLengthHead}, - cborTypeMap: {cborMapWithIndefiniteLengthHead}, -} - func (enc *Encoder) startIndefinite(typ cborType) error { if enc.em.indefLength == IndefLengthForbidden { return &IndefiniteLengthError{typ} } - _, err := enc.w.Write(cborIndefHeader[typ]) + var head byte + switch typ { + case cborTypeByteString: + head = cborByteStringWithIndefiniteLengthHead + case cborTypeTextString: + head = cborTextStringWithIndefiniteLengthHead + case cborTypeArray: + head = cborArrayWithIndefiniteLengthHead + case cborTypeMap: + head = cborMapWithIndefiniteLengthHead + } + _, err := enc.w.Write([]byte{head}) if err == nil { enc.indefTypes = append(enc.indefTypes, typ) } @@ -262,7 +272,9 @@ type RawMessage []byte // MarshalCBOR returns m or CBOR nil if m is nil. func (m RawMessage) MarshalCBOR() ([]byte, error) { if len(m) == 0 { - return cborNil, nil + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil } return m, nil } diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/structfields.go b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/structfields.go index cf0a922cd..b2d71f2e9 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/structfields.go +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -6,27 +6,43 @@ package cbor import ( "reflect" "sort" + "strconv" "strings" ) +// field holds shared struct field metadata returned by getFields(). type field struct { - name string - nameAsInt int64 // used to decoder to match field name with CBOR int + name string + nameAsInt int64 // used to match field name with CBOR int + idx []int + typ reflect.Type // used during cache building only + keyAsInt bool // used to encode/decode field name as int + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + omitZero bool // used to skip zero field +} + +type fields []*field + +// encodingField extends field with encoding-specific data. +type encodingField struct { + field cborName []byte - cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 - idx []int - typ reflect.Type + cborNameByteString []byte // major type 2 name encoding if cborName has major type 3 ef encodeFunc ief isEmptyFunc izf isZeroFunc - typInfo *typeInfo // used to decoder to reuse type info - tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) - omitEmpty bool // used to skip empty field - omitZero bool // used to skip zero field - keyAsInt bool // used to encode/decode field name as int } -type fields []*field +type encodingFields []*encodingField + +// decodingField extends field with decoding-specific data. +type decodingField struct { + field + typInfo *typeInfo // used by decoder to reuse type info +} + +type decodingFields []*decodingField // indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. type indexFieldSorter struct { @@ -48,7 +64,7 @@ func (x *indexFieldSorter) Less(i, j int) bool { return iIdx[k] < jIdx[k] } } - return len(iIdx) <= len(jIdx) + return len(iIdx) < len(jIdx) } // nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. @@ -69,6 +85,10 @@ func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { if fi.name != fj.name { return fi.name < fj.name } + // Fields with the same name but different keyAsInt are in separate namespaces. + if fi.keyAsInt != fj.keyAsInt { + return fi.keyAsInt + } if len(fi.idx) != len(fj.idx) { return len(fi.idx) < len(fj.idx) } @@ -117,22 +137,37 @@ func getFields(t reflect.Type) (flds fields, structOptions string) { } } + // Normalize keyasint field names to their canonical integer string form. + // This ensures that "01", "+1", and "1" are treated as the same key + // during deduplication. + for _, f := range flds { + if f.keyAsInt { + nameAsInt, err := strconv.Atoi(f.name) + if err != nil { + continue // Leave invalid names for callers to report. + } + f.nameAsInt = int64(nameAsInt) + f.name = strconv.Itoa(nameAsInt) + } + } + sort.Sort(&nameLevelAndTagFieldSorter{flds}) // Keep visible fields. j := 0 // index of next unique field for i := 0; i < len(flds); { name := flds[i].name + keyAsInt := flds[i].keyAsInt if i == len(flds)-1 || // last field - name != flds[i+1].name || // field i has unique field name + name != flds[i+1].name || flds[i+1].keyAsInt != keyAsInt || // field i has unique (name, keyAsInt) len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not flds[j] = flds[i] j++ } - // Skip fields with the same field name. - for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + // Skip fields with the same (name, keyAsInt). + for i++; i < len(flds) && name == flds[i].name && keyAsInt == flds[i].keyAsInt; i++ { //nolint:revive } } if j != len(flds) { diff --git a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/valid.go b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/valid.go index b40793b95..850b95019 100644 --- a/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/valid.go +++ b/openshift/tests-extension/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -54,7 +54,7 @@ func (e *MaxMapPairsError) Error() string { return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" } -// IndefiniteLengthError indicates found disallowed indefinite length items. +// IndefiniteLengthError indicates found disallowed indefinite-length items. type IndefiniteLengthError struct { t cborType } @@ -113,7 +113,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err } return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) } - valInt := int(val) + valInt := int(val) //nolint:gosec if valInt < 0 { // Detect integer overflow return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") @@ -136,7 +136,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) } - valInt := int(val) + valInt := int(val) //nolint:gosec if valInt < 0 { // Detect integer overflow return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") @@ -212,7 +212,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err return depth, nil } -// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +// wellformedIndefiniteString checks indefinite-length byte/text string's well-formedness and returns max depth and error. func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { var err error for { @@ -223,7 +223,7 @@ func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltin d.off++ break } - // Peek ahead to get next type and indefinite length status. + // Peek ahead to get next type and indefinite-length status. nt, ai := parseInitialByte(d.data[d.off]) if t != nt { return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} @@ -238,7 +238,7 @@ func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltin return depth, nil } -// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +// wellformedIndefiniteArrayOrMap checks indefinite-length array/map's well-formedness and returns max depth and error. func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { var err error maxDepth := depth @@ -326,7 +326,7 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) d.off += argumentSize if t == cborTypePrimitives { - if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { //nolint:gosec return 0, 0, 0, err } } @@ -341,7 +341,7 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) d.off += argumentSize if t == cborTypePrimitives { - if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { //nolint:gosec return 0, 0, 0, err } } @@ -379,12 +379,12 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) func (d *decoder) acceptableFloat(f float64) error { switch { - case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + case d.dm.nan == NaNDecodeForbidden && math.IsNaN(f): return &UnacceptableDataItemError{ CBORType: cborTypePrimitives.String(), Message: "floating-point NaN", } - case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + case d.dm.inf == InfDecodeForbidden && math.IsInf(f, 0): return &UnacceptableDataItemError{ CBORType: cborTypePrimitives.String(), Message: "floating-point infinity", diff --git a/openshift/tests-extension/vendor/github.com/operator-framework/operator-controller/api/v1/clusterobjectset_types.go b/openshift/tests-extension/vendor/github.com/operator-framework/operator-controller/api/v1/clusterobjectset_types.go index ed1a4001e..d7e755992 100644 --- a/openshift/tests-extension/vendor/github.com/operator-framework/operator-controller/api/v1/clusterobjectset_types.go +++ b/openshift/tests-extension/vendor/github.com/operator-framework/operator-controller/api/v1/clusterobjectset_types.go @@ -510,6 +510,39 @@ type ClusterObjectSetStatus struct { // +listMapKey=type // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` + + // observedPhases records the content hashes of resolved phases + // at first successful reconciliation. This is used to detect if + // referenced object sources were deleted and recreated with + // different content. Each entry covers all fully-resolved object + // manifests within a phase, making it source-agnostic. + // + // +kubebuilder:validation:XValidation:rule="self == oldSelf || oldSelf.size() == 0",message="observedPhases is immutable" + // +kubebuilder:validation:MaxItems=20 + // +listType=map + // +listMapKey=name + // +optional + ObservedPhases []ObservedPhase `json:"observedPhases,omitempty"` +} + +// ObservedPhase records the observed content digest of a resolved phase. +type ObservedPhase struct { + // name is the phase name matching a phase in spec.phases. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:XValidation:rule=`!format.dns1123Label().validate(self).hasValue()`,message="the value must consist of only lowercase alphanumeric characters and hyphens, and must start with an alphabetic character and end with an alphanumeric character." + Name string `json:"name"` + + // digest is the digest of the phase's resolved object content + // at first successful resolution, in the format ":". + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:XValidation:rule=`self.matches('^[a-z0-9]+:[a-f0-9]+$')`,message="digest must be in the format ':'" + Digest string `json:"digest"` } // +genclient diff --git a/openshift/tests-extension/vendor/github.com/operator-framework/operator-controller/api/v1/zz_generated.deepcopy.go b/openshift/tests-extension/vendor/github.com/operator-framework/operator-controller/api/v1/zz_generated.deepcopy.go index 8d1cea024..384439787 100644 --- a/openshift/tests-extension/vendor/github.com/operator-framework/operator-controller/api/v1/zz_generated.deepcopy.go +++ b/openshift/tests-extension/vendor/github.com/operator-framework/operator-controller/api/v1/zz_generated.deepcopy.go @@ -555,6 +555,11 @@ func (in *ClusterObjectSetStatus) DeepCopyInto(out *ClusterObjectSetStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ObservedPhases != nil { + in, out := &in.ObservedPhases, &out.ObservedPhases + *out = make([]ObservedPhase, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObjectSetStatus. @@ -664,6 +669,21 @@ func (in *ObjectSourceRef) DeepCopy() *ObjectSourceRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservedPhase) DeepCopyInto(out *ObservedPhase) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservedPhase. +func (in *ObservedPhase) DeepCopy() *ObservedPhase { + if in == nil { + return nil + } + out := new(ObservedPhase) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PreflightConfig) DeepCopyInto(out *PreflightConfig) { *out = *in diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.golangci.yml b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.golangci.yml index d12c8920a..db1f55101 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -17,6 +17,7 @@ linters: - ineffassign - misspell - modernize + - noctx - perfsprint - revive - staticcheck @@ -88,6 +89,16 @@ linters: deny: - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal desc: Do not use cross-module internal packages. + semconv: + list-mode: lax + files: + - "!**/semconv/**" + - "!**/exporters/zipkin/**" + deny: + - pkg: go.opentelemetry.io/otel/semconv + desc: "Use go.opentelemetry.io/otel/semconv/v1.40.0 instead. If a newer semconv version has been released, update the depguard rule." + allow: + - go.opentelemetry.io/otel/semconv/v1.40.0 gocritic: disabled-checks: - appendAssign diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CHANGELOG.md index ab7263014..20edda441 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,49 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.43.0/0.65.0/0.19.0] 2026-04-02 + +### Added + +- Add `IsRandom` and `WithRandom` on `TraceFlags`, and `IsRandom` on `SpanContext` in `go.opentelemetry.io/otel/trace` for [W3C Trace Context Level 2 Random Trace ID Flag](https://www.w3.org/TR/trace-context-2/#random-trace-id-flag) support. (#8012) +- Add service detection with `WithService` in `go.opentelemetry.io/otel/sdk/resource`. (#7642) +- Add `DefaultWithContext` and `EnvironmentWithContext` in `go.opentelemetry.io/otel/sdk/resource` to support plumbing `context.Context` through default and environment detectors. (#8051) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8038) +- Add support for per-series start time tracking for cumulative metrics in `go.opentelemetry.io/otel/sdk/metric`. + Set `OTEL_GO_X_PER_SERIES_START_TIMESTAMPS=true` to enable. (#8060) +- Add `WithCardinalityLimitSelector` for metric reader for configuring cardinality limits specific to the instrument kind. (#7855) + +### Changed + +- Introduce the `EMPTY` Type in `go.opentelemetry.io/otel/attribute` to reflect that an empty value is now a valid value, with `INVALID` remaining as a deprecated alias of `EMPTY`. (#8038) +- Improve slice handling in `go.opentelemetry.io/otel/attribute` to optimize short slice values with fixed-size fast paths. (#8039) +- Improve performance of span metric recording in `go.opentelemetry.io/otel/sdk/trace` by returning early if self-observability is not enabled. (#8067) +- Improve formatting of metric data diffs in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8073) + +### Deprecated + +- Deprecate `INVALID` in `go.opentelemetry.io/otel/attribute`. Use `EMPTY` instead. (#8038) + +### Fixed + +- Return spec-compliant `TraceIdRatioBased` description. This is a breaking behavioral change, but it is necessary to + make the implementation [spec-compliant](https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased). (#8027) +- Fix a race condition in `go.opentelemetry.io/otel/sdk/metric` where the lastvalue aggregation could collect the value 0 even when no zero-value measurements were recorded. (#8056) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `kenv` command on BSD. (#8113) +- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to correctly handle HTTP2 GOAWAY frame. (#8096) + ## [1.42.0/0.64.0/0.18.0/0.0.16] 2026-03-06 ### Added @@ -3576,7 +3619,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.42.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.43.0...HEAD +[1.43.0/0.65.0/0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.43.0 [1.42.0/0.64.0/0.18.0/0.0.16]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.42.0 [1.41.0/0.63.0/0.17.0/0.0.15]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.41.0 [1.40.0/0.62.0/0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.40.0 diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/Makefile b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/Makefile index 9f6e6b511..42466f2d6 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/Makefile +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/Makefile @@ -38,10 +38,14 @@ CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit +SEMCONVKIT_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/semconvkit -type f)) $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit +$(TOOLS)/semconvkit: $(SEMCONVKIT_FILES) VERIFYREADMES = $(TOOLS)/verifyreadmes +VERIFYREADMES_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/verifyreadmes -type f)) $(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes +$(TOOLS)/verifyreadmes: $(VERIFYREADMES_FILES) GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/RELEASING.md b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/RELEASING.md index 861756fd7..6aff7548c 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -4,7 +4,9 @@ Create a `Version Release` issue to track the release process. -## Semantic Convention Generation +## Semantic Convention Upgrade + +### Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. @@ -22,6 +24,43 @@ make semconv-generate # Uses the exported TAG. This should create a new sub-package of [`semconv`](./semconv). Ensure things look correct before submitting a pull request to include the addition. +The `CHANGELOG.md` should also be updated to reflect the new changes: + +```md +- The `go.opentelemetry.io/otel/semconv/` package. The package contains semantic conventions from the `` version of the OpenTelemetry Semantic Conventions. See the [migration documentation](./semconv//MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/`. (#PR_NUMBER) +``` + +> **Tip:** Change to the release and prior version to match the changes + +### Update semconv imports + +Once the new semconv module has been generated, update all semconv imports throughout the codebase to reference the new version: + +```go +// Before +semconv "go.opentelemetry.io/otel/semconv/v1.37.0" +"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + + +// After +semconv "go.opentelemetry.io/otel/semconv/v1.39.0" +"go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" +``` + +Once complete, run `make` to check for any compilation or test failures. + +#### Handling attribute changes + +Some semconv releases might add new attributes or impact attributes that are currently being used. Changes could stem from a simple renaming, to more complex changes like merging attributes and property values being changed. + +One should update the code to the new attributes that supersede the impacted ones, hence sticking to the semantic conventions. However, legacy attributes might still be emitted in accordance to the `OTEL_SEMCONV_STABILITY_OPT_IN` environment variable. + +For an example on how such migration might have to be tracked and performed, see issue [#7806](https://github.com/open-telemetry/opentelemetry-go/issues/7806). + +### Go contrib linter update + +Update [.golangci.yml](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/.golangci.yml) in [opentelemetry-go-contrib](https://github.com/open-telemetry/opentelemetry-go-contrib/) to mandate the new semconv version. + ## Breaking changes validation You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 6cc1a1655..771dd69c5 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -53,7 +53,7 @@ var ( _ Encoder = &defaultAttrEncoder{} // encoderIDCounter is for generating IDs for other attribute encoders. - encoderIDCounter uint64 + encoderIDCounter atomic.Uint64 defaultEncoderOnce sync.Once defaultEncoderID = NewEncoderID() @@ -64,7 +64,7 @@ var ( // once per each type of attribute encoder. Preferably in init() or in var // definition. func NewEncoderID() EncoderID { - return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} + return EncoderID{value: encoderIDCounter.Add(1)} } // DefaultEncoder returns an attribute encoder that encodes attributes in such diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/hash.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/hash.go index 6aa69aeae..b09caaa6d 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/hash.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -27,6 +27,7 @@ const ( int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) + emptyID uint64 = 7305809155345288421 // "__empty_" (little endian) ) // hashKVs returns a new xxHash64 hash of kvs. @@ -80,7 +81,8 @@ func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { for i := 0; i < rv.Len(); i++ { h = h.String(rv.Index(i).String()) } - case INVALID: + case EMPTY: + h = h.Uint64(emptyID) default: // Logging is an alternative, but using the internal logger here // causes an import cycle so it is not done. diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index 7f5eae877..d9f51fa2d 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -11,80 +11,63 @@ import ( "reflect" ) -// BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[bool]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() +// sliceElem is the exact set of element types stored in attribute slice values. +// Using a closed set prevents accidental instantiations for unsupported types. +type sliceElem interface { + bool | int64 | float64 | string } -// Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} - -// Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[float64]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} +// SliceValue converts a slice into an array with the same elements. +func SliceValue[T sliceElem](v []T) any { + // Keep only the common tiny-slice cases out of reflection. Extending this + // much further increases code size for diminishing benefit while larger + // slices still need the generic reflective path to preserve comparability. + // This matches the short lengths that show up most often in local + // benchmarks and semantic convention examples while leaving larger, less + // predictable slices on the generic reflective path. + switch len(v) { + case 0: + return [0]T{} + case 1: + return [1]T{v[0]} + case 2: + return [2]T{v[0], v[1]} + case 3: + return [3]T{v[0], v[1], v[2]} + } -// StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[string]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() + return sliceValueReflect(v) } -// AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v any) []bool { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil +// AsSlice converts an array into a slice with the same elements. +func AsSlice[T sliceElem](v any) []T { + // Mirror the small fixed-array fast path used by SliceValue. + switch a := v.(type) { + case [0]T: + return []T{} + case [1]T: + return []T{a[0]} + case [2]T: + return []T{a[0], a[1]} + case [3]T: + return []T{a[0], a[1], a[2]} } - cpy := make([]bool, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy -} -// AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v any) []int64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]int64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy + return asSliceReflect[T](v) } -// AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v any) []float64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]float64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy +func sliceValueReflect[T sliceElem](v []T) any { + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[T]())).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } -// AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v any) []string { +func asSliceReflect[T sliceElem](v any) []T { rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { + if !rv.IsValid() || rv.Kind() != reflect.Array || rv.Type().Elem() != reflect.TypeFor[T]() { return nil } - cpy := make([]string, rv.Len()) + cpy := make([]T, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/kv.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/kv.go index 8c6928ca7..0cc368018 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -15,7 +15,7 @@ type KeyValue struct { // Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { - return kv.Key.Defined() && kv.Value.Type() != INVALID + return kv.Key.Defined() } // Bool creates a KeyValue with a BOOL Value type. diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/type_string.go index 24f1fa37d..6c04448d6 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -8,7 +8,7 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} - _ = x[INVALID-0] + _ = x[EMPTY-0] _ = x[BOOL-1] _ = x[INT64-2] _ = x[FLOAT64-3] @@ -19,9 +19,9 @@ func _() { _ = x[STRINGSLICE-8] } -const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" +const _Type_name = "EMPTYBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" -var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} +var _Type_index = [...]uint8{0, 5, 9, 14, 21, 27, 36, 46, 58, 69} func (i Type) String() string { idx := int(i) - 0 diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/value.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/value.go index 5931e7129..db04b1326 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -6,7 +6,6 @@ package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "fmt" - "reflect" "strconv" attribute "go.opentelemetry.io/otel/attribute/internal" @@ -18,6 +17,8 @@ import ( type Type int // nolint: revive // redefines builtin Type. // Value represents the value part in key-value pairs. +// +// Note that the zero value is a valid empty value. type Value struct { vtype Type numeric uint64 @@ -26,8 +27,8 @@ type Value struct { } const ( - // INVALID is used for a Value with no value set. - INVALID Type = iota + // EMPTY is used for a Value with no value set. + EMPTY Type = iota // BOOL is a boolean Type Value. BOOL // INT64 is a 64-bit signed integral Type Value. @@ -44,6 +45,10 @@ const ( FLOAT64SLICE // STRINGSLICE is a slice of strings Type Value. STRINGSLICE + // INVALID is used for a Value with no value set. + // + // Deprecated: Use EMPTY instead as an empty value is a valid value. + INVALID = EMPTY ) // BoolValue creates a BOOL Value. @@ -56,7 +61,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} + return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)} } // IntValue creates an INT64 Value. @@ -64,16 +69,30 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } -// IntSliceValue creates an INTSLICE Value. +// IntSliceValue creates an INT64SLICE Value. func IntSliceValue(v []int) Value { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())) - for i, val := range v { - cp.Elem().Index(i).SetInt(int64(val)) - } - return Value{ - vtype: INT64SLICE, - slice: cp.Elem().Interface(), + val := Value{vtype: INT64SLICE} + + // Avoid the common tiny-slice cases from allocating a new slice. + switch len(v) { + case 0: + val.slice = [0]int64{} + case 1: + val.slice = [1]int64{int64(v[0])} + case 2: + val.slice = [2]int64{int64(v[0]), int64(v[1])} + case 3: + val.slice = [3]int64{int64(v[0]), int64(v[1]), int64(v[2])} + default: + // Fallback to a new slice for larger slices. + cp := make([]int64, len(v)) + for i, val := range v { + cp[i] = int64(val) + } + val.slice = attribute.SliceValue(cp) } + + return val } // Int64Value creates an INT64 Value. @@ -86,7 +105,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} + return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -99,7 +118,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} + return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)} } // StringValue creates a STRING Value. @@ -112,7 +131,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} + return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)} } // Type returns a type of the Value. @@ -136,7 +155,7 @@ func (v Value) AsBoolSlice() []bool { } func (v Value) asBoolSlice() []bool { - return attribute.AsBoolSlice(v.slice) + return attribute.AsSlice[bool](v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -155,7 +174,7 @@ func (v Value) AsInt64Slice() []int64 { } func (v Value) asInt64Slice() []int64 { - return attribute.AsInt64Slice(v.slice) + return attribute.AsSlice[int64](v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -174,7 +193,7 @@ func (v Value) AsFloat64Slice() []float64 { } func (v Value) asFloat64Slice() []float64 { - return attribute.AsFloat64Slice(v.slice) + return attribute.AsSlice[float64](v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -193,7 +212,7 @@ func (v Value) AsStringSlice() []string { } func (v Value) asStringSlice() []string { - return attribute.AsStringSlice(v.slice) + return attribute.AsSlice[string](v.slice) } type unknownValueType struct{} @@ -217,6 +236,8 @@ func (v Value) AsInterface() any { return v.stringly case STRINGSLICE: return v.asStringSlice() + case EMPTY: + return nil } return unknownValueType{} } @@ -252,6 +273,8 @@ func (v Value) Emit() string { return string(j) case STRING: return v.stringly + case EMPTY: + return "" default: return "unknown" } diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index f0cc942ba..7a9b3c055 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python -FROM otel/weaver:v0.21.2@sha256:2401de985c38bdb98b43918e2f43aa36b2afed4aa5669ac1c1de0a17301cd36d AS weaver +FROM otel/weaver:v0.22.1@sha256:33ae522ae4b71c1c562563c1d81f46aa0f79f088a0873199143a1f11ac30e5c9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go index bfeb73e81..694b64a31 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go @@ -37,3 +37,18 @@ var Observability = newFeature( return "", false }, ) + +// PerSeriesStartTimestamps is an experimental feature flag that determines if the SDK +// uses the new Start Timestamps specification. +// +// To enable this feature set the OTEL_GO_X_PER_SERIES_START_TIMESTAMPS environment variable +// to the case-insensitive string value of "true". +var PerSeriesStartTimestamps = newFeature( + []string{"PER_SERIES_START_TIMESTAMPS"}, + func(v string) (bool, bool) { + if strings.EqualFold(v, "true") { + return true, true + } + return false, false + }, +) diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 8a7bb330b..04f15fcd2 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type ( diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/config.go index 0d6e213d9..a3d647d92 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -193,3 +193,11 @@ func WithContainer() Option { func WithContainerID() Option { return WithDetectors(cgroupContainerIDDetector{}) } + +// WithService adds all the Service attributes to the configured Resource. +func WithService() Option { + return WithDetectors( + defaultServiceInstanceIDDetector{}, + defaultServiceNameDetector{}, + ) +} diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index a19b39def..e977ff1c4 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type containerIDProvider func() (string, error) diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index c49157224..bc0e5c19e 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) const ( diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 023621ba7..755c08242 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type hostIDProvider func() (string, error) @@ -31,19 +31,19 @@ type hostIDReaderBSD struct { readFile fileReader } -// read attempts to read the machine-id from /etc/hostid. If not found it will -// execute `kenv -q smbios.system.uuid`. If neither location yields an id an -// error will be returned. +// read attempts to read the machine-id from /etc/hostid. +// If not found it will execute: /bin/kenv -q smbios.system.uuid. +// If neither location yields an id an error will be returned. func (r *hostIDReaderBSD) read() (string, error) { if result, err := r.readFile("/etc/hostid"); err == nil { return strings.TrimSpace(result), nil } - if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + if result, err := r.execCommand("/bin/kenv", "-q", "smbios.system.uuid"); err == nil { return strings.TrimSpace(result), nil } - return "", errors.New("host id not found in: /etc/hostid or kenv") + return "", errors.New("host id not found in: /etc/hostid or /bin/kenv") } // hostIDReaderDarwin implements hostIDReader. diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go index 6354b3560..c95d87685 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -8,7 +8,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import "os" func readFile(filename string) (string, error) { - b, err := os.ReadFile(filename) + b, err := os.ReadFile(filename) // nolint:gosec // false positive if err != nil { return "", err } diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 534809e21..f5682cad4 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type osDescriptionProvider func() (string, error) diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index a1189553c..99dce64f6 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type ( diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 28e1e4f7e..f715be53e 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -232,6 +232,15 @@ func Empty() *Resource { // Default returns an instance of Resource with a default // "service.name" and OpenTelemetrySDK attributes. func Default() *Resource { + return DefaultWithContext(context.Background()) +} + +// DefaultWithContext returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +// +// If the default resource has already been initialized, the provided ctx +// is ignored and the cached resource is returned. +func DefaultWithContext(ctx context.Context) *Resource { defaultResourceOnce.Do(func() { var err error defaultDetectors := []Detector{ @@ -243,7 +252,7 @@ func Default() *Resource { defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) } defaultResource, err = Detect( - context.Background(), + ctx, defaultDetectors..., ) if err != nil { @@ -260,8 +269,14 @@ func Default() *Resource { // Environment returns an instance of Resource with attributes // extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. func Environment() *Resource { + return EnvironmentWithContext(context.Background()) +} + +// EnvironmentWithContext returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func EnvironmentWithContext(ctx context.Context) *Resource { detector := &fromEnv{} - resource, err := detector.Detect(context.Background()) + resource, err := detector.Detect(ctx) if err != nil { otel.Handle(err) } diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 7d15cbb9c..32854b14a 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -68,7 +68,7 @@ type batchSpanProcessor struct { o BatchSpanProcessorOptions queue chan ReadOnlySpan - dropped uint32 + dropped atomic.Uint32 inst *observ.BSP @@ -123,12 +123,10 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO otel.Handle(err) } - bsp.stopWait.Add(1) - go func() { - defer bsp.stopWait.Done() + bsp.stopWait.Go(func() { bsp.processQueue() bsp.drainQueue() - }() + }) return bsp } @@ -295,7 +293,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { } if l := len(bsp.batch); l > 0 { - global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", bsp.dropped.Load()) if bsp.inst != nil { bsp.inst.Processed(ctx, int64(l)) } @@ -423,7 +421,7 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) case bsp.queue <- sd: return true default: - atomic.AddUint32(&bsp.dropped, 1) + bsp.dropped.Add(1) if bsp.inst != nil { bsp.inst.ProcessedQueueFull(ctx, 1) } diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go index d9cfba0b4..c31e03aa0 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go @@ -13,8 +13,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go index 8afd05267..0e77cd953 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go @@ -13,8 +13,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) var measureAttrsPool = sync.Pool{ diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go index 13a2db296..560d316f2 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -55,6 +55,10 @@ func NewTracer() (Tracer, error) { func (t Tracer) Enabled() bool { return t.enabled } func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { + if !t.started.Enabled(ctx) { + return + } + key := spanStartedKey{ parent: parentStateNoParent, sampling: samplingStateDrop, @@ -89,6 +93,10 @@ func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { } func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { + if !t.live.Enabled(ctx) { + return + } + key := spanLiveKey{sampled: span.SpanContext().IsSampled()} opts := spanLiveOpts[key] t.live.Add(ctx, value, opts...) diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index d2cf4ebd3..cd40d299d 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,6 +5,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "fmt" "sync" "sync/atomic" @@ -262,6 +263,7 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { return nil } + var err error for _, sps := range spss { select { case <-ctx.Done(): @@ -269,11 +271,9 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { default: } - if err := sps.sp.ForceFlush(ctx); err != nil { - return err - } + err = errors.Join(err, sps.sp.ForceFlush(ctx)) } - return nil + return err } // Shutdown shuts down TracerProvider. All registered span processors are shut down @@ -303,14 +303,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { sps.state.Do(func() { err = sps.sp.Shutdown(ctx) }) - if err != nil { - if retErr == nil { - retErr = err - } else { - // Poor man's list of errors - retErr = fmt.Errorf("%w; %w", retErr, err) - } - } + retErr = errors.Join(retErr, err) } p.spanProcessors.Store(&spanProcessorStates{}) return retErr diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index 81c5060ad..845e292c2 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -69,17 +69,17 @@ type traceIDRatioSampler struct { } func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { - psc := trace.SpanContextFromContext(p.ParentContext) + state := trace.SpanContextFromContext(p.ParentContext).TraceState() x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 if x < ts.traceIDUpperBound { return SamplingResult{ Decision: RecordAndSample, - Tracestate: psc.TraceState(), + Tracestate: state, } } return SamplingResult{ Decision: Drop, - Tracestate: psc.TraceState(), + Tracestate: state, } } @@ -94,12 +94,20 @@ func (ts traceIDRatioSampler) Description() string { // //nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` func TraceIDRatioBased(fraction float64) Sampler { + // Cannot use AlwaysSample() and NeverSample(), must return spec-compliant descriptions. + // See https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased. if fraction >= 1 { - return AlwaysSample() + return predeterminedSampler{ + description: "TraceIDRatioBased{1}", + decision: RecordAndSample, + } } if fraction <= 0 { - fraction = 0 + return predeterminedSampler{ + description: "TraceIDRatioBased{0}", + decision: Drop, + } } return &traceIDRatioSampler{ @@ -118,6 +126,7 @@ func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { } func (alwaysOnSampler) Description() string { + // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwayson return "AlwaysOnSampler" } @@ -139,6 +148,7 @@ func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { } func (alwaysOffSampler) Description() string { + // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwaysoff return "AlwaysOffSampler" } @@ -147,6 +157,22 @@ func NeverSample() Sampler { return alwaysOffSampler{} } +type predeterminedSampler struct { + description string + decision SamplingDecision +} + +func (s predeterminedSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: s.decision, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (s predeterminedSampler) Description() string { + return s.description +} + // ParentBased returns a sampler decorator which behaves differently, // based on the parent of the span. If the span has no parent, // the decorated sampler is used to make sampling decision. If the span has diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index d46661059..7d55ce1dc 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/version.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/version.go index b5497c281..766731dd2 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.40.0" + return "1.43.0" } diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go new file mode 100644 index 000000000..901da8698 --- /dev/null +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go @@ -0,0 +1,2222 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelconv provides types and functionality for OpenTelemetry semantic +// conventions in the "otel" namespace. +package otelconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ComponentTypeAttr is an attribute conforming to the otel.component.type +// semantic conventions. It represents a name identifying the type of the +// OpenTelemetry component. +type ComponentTypeAttr string + +var ( + // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span + // processor. + ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" + // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. + ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" + // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record + // processor. + ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" + // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record + // processor. + ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" + // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with + // protobuf serialization. + ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" + // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with + // protobuf serialization. + ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" + // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" + // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. + ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" + // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" + // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" + // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over + // HTTP with JSON serialization. + ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" + // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting + // metric reader. + ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" + // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" + // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" + // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" + // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric + // exporter over HTTP with the default text-based format. + ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" +) + +// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin +// semantic conventions. It represents the determines whether the span has a +// parent span, and if so, [whether it is a remote parent]. +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +type SpanParentOriginAttr string + +var ( + // SpanParentOriginNone is the span does not have a parent, it is a root span. + SpanParentOriginNone SpanParentOriginAttr = "none" + // SpanParentOriginLocal is the span has a parent and the parent's span context + // [isRemote()] is false. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginLocal SpanParentOriginAttr = "local" + // SpanParentOriginRemote is the span has a parent and the parent's span context + // [isRemote()] is true. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginRemote SpanParentOriginAttr = "remote" +) + +// SpanSamplingResultAttr is an attribute conforming to the +// otel.span.sampling_result semantic conventions. It represents the result value +// of the sampler for this span. +type SpanSamplingResultAttr string + +var ( + // SpanSamplingResultDrop is the span is not sampled and not recording. + SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" + // SpanSamplingResultRecordOnly is the span is not sampled, but recording. + SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" + // SpanSamplingResultRecordAndSample is the span is sampled and recording. + SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" +) + +// SDKExporterLogExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It +// represents the number of log records for which the export has finished, either +// successful or failed. +type SDKExporterLogExported struct { + metric.Int64Counter +} + +var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + +// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. +func NewSDKExporterLogExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterLogExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterLogExportedOpts + } else { + opt = append(opt, newSDKExporterLogExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.log.exported", + opt..., + ) + if err != nil { + return SDKExporterLogExported{noop.Int64Counter{}}, err + } + return SDKExporterLogExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogExported) Name() string { + return "otel.sdk.exporter.log.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogExported) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogExported) Description() string { + return "The number of log records for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterLogInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It +// represents the number of log records which were passed to the exporter, but +// that have not been exported yet (neither successful, nor failed). +type SDKExporterLogInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), +} + +// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. +func NewSDKExporterLogInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterLogInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterLogInflightOpts + } else { + opt = append(opt, newSDKExporterLogInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.log.inflight", + opt..., + ) + if err != nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterLogInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogInflight) Name() string { + return "otel.sdk.exporter.log.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogInflight) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogInflight) Description() string { + return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointExported is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.exported" +// semantic conventions. It represents the number of metric data points for which +// the export has finished, either successful or failed. +type SDKExporterMetricDataPointExported struct { + metric.Int64Counter +} + +var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), +} + +// NewSDKExporterMetricDataPointExported returns a new +// SDKExporterMetricDataPointExported instrument. +func NewSDKExporterMetricDataPointExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterMetricDataPointExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointExportedOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.metric_data_point.exported", + opt..., + ) + if err != nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + } + return SDKExporterMetricDataPointExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointExported) Name() string { + return "otel.sdk.exporter.metric_data_point.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointExported) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointExported) Description() string { + return "The number of metric data points for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointInflight is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" +// semantic conventions. It represents the number of metric data points which +// were passed to the exporter, but that have not been exported yet (neither +// successful, nor failed). +type SDKExporterMetricDataPointInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), +} + +// NewSDKExporterMetricDataPointInflight returns a new +// SDKExporterMetricDataPointInflight instrument. +func NewSDKExporterMetricDataPointInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterMetricDataPointInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointInflightOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.metric_data_point.inflight", + opt..., + ) + if err != nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterMetricDataPointInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointInflight) Name() string { + return "otel.sdk.exporter.metric_data_point.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointInflight) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointInflight) Description() string { + return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterOperationDuration is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. +// It represents the duration of exporting a batch of telemetry records. +type SDKExporterOperationDuration struct { + metric.Float64Histogram +} + +var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), +} + +// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration +// instrument. +func NewSDKExporterOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKExporterOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterOperationDurationOpts + } else { + opt = append(opt, newSDKExporterOperationDurationOpts...) + } + + i, err := m.Float64Histogram( + "otel.sdk.exporter.operation.duration", + opt..., + ) + if err != nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + } + return SDKExporterOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterOperationDuration) Name() string { + return "otel.sdk.exporter.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterOperationDuration) Description() string { + return "The duration of exporting a batch of telemetry records." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrHTTPResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the HTTP status +// code of the last HTTP request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrRPCResponseStatusCode returns an optional attribute for the +// "rpc.response.status_code" semantic convention. It represents the gRPC status +// code of the last gRPC request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrRPCResponseStatusCode(val string) attribute.KeyValue { + return attribute.String("rpc.response.status_code", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It +// represents the number of spans for which the export has finished, either +// successful or failed. +type SDKExporterSpanExported struct { + metric.Int64Counter +} + +var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + +// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. +func NewSDKExporterSpanExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterSpanExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterSpanExportedOpts + } else { + opt = append(opt, newSDKExporterSpanExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.span.exported", + opt..., + ) + if err != nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, err + } + return SDKExporterSpanExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanExported) Name() string { + return "otel.sdk.exporter.span.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanExported) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanExported) Description() string { + return "The number of spans for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It +// represents the number of spans which were passed to the exporter, but that +// have not been exported yet (neither successful, nor failed). +type SDKExporterSpanInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), +} + +// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. +func NewSDKExporterSpanInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterSpanInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterSpanInflightOpts + } else { + opt = append(opt, newSDKExporterSpanInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.span.inflight", + opt..., + ) + if err != nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterSpanInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanInflight) Name() string { + return "otel.sdk.exporter.span.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanInflight) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanInflight) Description() string { + return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKLogCreated is an instrument used to record metric values conforming to the +// "otel.sdk.log.created" semantic conventions. It represents the number of logs +// submitted to enabled SDK Loggers. +type SDKLogCreated struct { + metric.Int64Counter +} + +var newSDKLogCreatedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), +} + +// NewSDKLogCreated returns a new SDKLogCreated instrument. +func NewSDKLogCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKLogCreated, error) { + // Check if the meter is nil. + if m == nil { + return SDKLogCreated{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKLogCreatedOpts + } else { + opt = append(opt, newSDKLogCreatedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.log.created", + opt..., + ) + if err != nil { + return SDKLogCreated{noop.Int64Counter{}}, err + } + return SDKLogCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKLogCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKLogCreated) Name() string { + return "otel.sdk.log.created" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKLogCreated) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKLogCreated) Description() string { + return "The number of logs submitted to enabled SDK Loggers." +} + +// Add adds incr to the existing count for attrs. +func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SDKMetricReaderCollectionDuration is an instrument used to record metric +// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic +// conventions. It represents the duration of the collect operation of the metric +// reader. +type SDKMetricReaderCollectionDuration struct { + metric.Float64Histogram +} + +var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), +} + +// NewSDKMetricReaderCollectionDuration returns a new +// SDKMetricReaderCollectionDuration instrument. +func NewSDKMetricReaderCollectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKMetricReaderCollectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newSDKMetricReaderCollectionDurationOpts + } else { + opt = append(opt, newSDKMetricReaderCollectionDurationOpts...) + } + + i, err := m.Float64Histogram( + "otel.sdk.metric_reader.collection.duration", + opt..., + ) + if err != nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + } + return SDKMetricReaderCollectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKMetricReaderCollectionDuration) Name() string { + return "otel.sdk.metric_reader.collection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKMetricReaderCollectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKMetricReaderCollectionDuration) Description() string { + return "The duration of the collect operation of the metric reader." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It +// represents the number of log records for which the processing has finished, +// either successful or failed. +type SDKProcessorLogProcessed struct { + metric.Int64Counter +} + +var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. +func NewSDKProcessorLogProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorLogProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogProcessedOpts + } else { + opt = append(opt, newSDKProcessorLogProcessedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.processor.log.processed", + opt..., + ) + if err != nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorLogProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogProcessed) Name() string { + return "otel.sdk.processor.log.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogProcessed) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogProcessed) Description() string { + return "The number of log records for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Log Record Processors MUST use `queue_full` for log records +// dropped due to a full queue. +func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.capacity" semantic +// conventions. It represents the maximum number of log records the queue of a +// given instance of an SDK Log Record processor can hold. +type SDKProcessorLogQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity +// instrument. +func NewSDKProcessorLogQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorLogQueueCapacityOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.capacity", + opt..., + ) + if err != nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueCapacity) Name() string { + return "otel.sdk.processor.log.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueCapacity) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueCapacity) Description() string { + return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It +// represents the number of log records in the queue of a given instance of an +// SDK log processor. +type SDKProcessorLogQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. +func NewSDKProcessorLogQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorLogQueueSizeOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.size", + opt..., + ) + if err != nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueSize) Name() string { + return "otel.sdk.processor.log.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueSize) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueSize) Description() string { + return "The number of log records in the queue of a given instance of an SDK log processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It +// represents the number of spans for which the processing has finished, either +// successful or failed. +type SDKProcessorSpanProcessed struct { + metric.Int64Counter +} + +var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed +// instrument. +func NewSDKProcessorSpanProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorSpanProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanProcessedOpts + } else { + opt = append(opt, newSDKProcessorSpanProcessedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.processor.span.processed", + opt..., + ) + if err != nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorSpanProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanProcessed) Name() string { + return "otel.sdk.processor.span.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanProcessed) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanProcessed) Description() string { + return "The number of spans for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a +// full queue. +func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.capacity" semantic +// conventions. It represents the maximum number of spans the queue of a given +// instance of an SDK span processor can hold. +type SDKProcessorSpanQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity +// instrument. +func NewSDKProcessorSpanQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.capacity", + opt..., + ) + if err != nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueCapacity) Name() string { + return "otel.sdk.processor.span.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueCapacity) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueCapacity) Description() string { + return "The maximum number of spans the queue of a given instance of an SDK span processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. +// It represents the number of spans in the queue of a given instance of an SDK +// span processor. +type SDKProcessorSpanQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize +// instrument. +func NewSDKProcessorSpanQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueSizeOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.size", + opt..., + ) + if err != nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueSize) Name() string { + return "otel.sdk.processor.span.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueSize) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueSize) Description() string { + return "The number of spans in the queue of a given instance of an SDK span processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKSpanLive is an instrument used to record metric values conforming to the +// "otel.sdk.span.live" semantic conventions. It represents the number of created +// spans with `recording=true` for which the end operation has not been called +// yet. +type SDKSpanLive struct { + metric.Int64UpDownCounter +} + +var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), +} + +// NewSDKSpanLive returns a new SDKSpanLive instrument. +func NewSDKSpanLive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKSpanLive, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKSpanLiveOpts + } else { + opt = append(opt, newSDKSpanLiveOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.span.live", + opt..., + ) + if err != nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, err + } + return SDKSpanLive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanLive) Name() string { + return "otel.sdk.span.live" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanLive) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanLive) Description() string { + return "The number of created spans with `recording=true` for which the end operation has not been called yet." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m SDKSpanLive) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} + +// SDKSpanStarted is an instrument used to record metric values conforming to the +// "otel.sdk.span.started" semantic conventions. It represents the number of +// created spans. +type SDKSpanStarted struct { + metric.Int64Counter +} + +var newSDKSpanStartedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), +} + +// NewSDKSpanStarted returns a new SDKSpanStarted instrument. +func NewSDKSpanStarted( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKSpanStarted, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanStarted{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKSpanStartedOpts + } else { + opt = append(opt, newSDKSpanStartedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.span.started", + opt..., + ) + if err != nil { + return SDKSpanStarted{noop.Int64Counter{}}, err + } + return SDKSpanStarted{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanStarted) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanStarted) Name() string { + return "otel.sdk.span.started" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanStarted) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanStarted) Description() string { + return "The number of created spans." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrSpanParentOrigin returns an optional attribute for the +// "otel.span.parent.origin" semantic convention. It represents the determines +// whether the span has a parent span, and if so, [whether it is a remote parent] +// . +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { + return attribute.String("otel.span.parent.origin", string(val)) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/trace.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/trace.go index 96c06ec32..e3d103c4b 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -196,6 +196,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // return tf &^ FlagsSampled } +// IsRandom reports whether the random bit is set in the TraceFlags. +func (tf TraceFlags) IsRandom() bool { + return tf&FlagsRandom == FlagsRandom +} + +// WithRandom sets the random bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithRandom(random bool) TraceFlags { // nolint:revive // random is not a control flag. + if random { + return tf | FlagsRandom + } + + return tf &^ FlagsRandom +} + // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { @@ -322,6 +336,11 @@ func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } +// IsRandom reports whether the random bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsRandom() bool { + return sc.traceFlags.IsRandom() +} + // WithTraceFlags returns a new SpanContext with the TraceFlags replaced. func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { return SpanContext{ diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/version.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/version.go index 2dae06f25..1db4f47e4 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/version.go +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.42.0" + return "1.43.0" } diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/versions.yaml b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/versions.yaml index 6c634ef31..bcc6ee78a 100644 --- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.42.0 + version: v1.43.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.64.0 + version: v0.65.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.18.0 + version: v0.19.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -64,3 +64,6 @@ modules: go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: version-refs: - ./internal/version.go + go.opentelemetry.io/otel/exporters/stdout/stdoutlog: + version-refs: + - ./internal/version.go diff --git a/openshift/tests-extension/vendor/modules.txt b/openshift/tests-extension/vendor/modules.txt index 73d290653..5d047f5b2 100644 --- a/openshift/tests-extension/vendor/modules.txt +++ b/openshift/tests-extension/vendor/modules.txt @@ -33,7 +33,7 @@ github.com/evanphx/json-patch/v5/internal/json # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fxamacker/cbor/v2 v2.9.0 +# github.com/fxamacker/cbor/v2 v2.9.1 ## explicit; go 1.20 github.com/fxamacker/cbor/v2 # github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 @@ -344,7 +344,7 @@ go.opentelemetry.io/auto/sdk/internal/telemetry go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv -# go.opentelemetry.io/otel v1.42.0 +# go.opentelemetry.io/otel v1.43.0 ## explicit; go 1.25.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -362,6 +362,7 @@ go.opentelemetry.io/otel/semconv/v1.39.0 go.opentelemetry.io/otel/semconv/v1.39.0/httpconv go.opentelemetry.io/otel/semconv/v1.39.0/otelconv go.opentelemetry.io/otel/semconv/v1.40.0 +go.opentelemetry.io/otel/semconv/v1.40.0/otelconv # go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 ## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -376,13 +377,13 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x -# go.opentelemetry.io/otel/metric v1.42.0 +# go.opentelemetry.io/otel/metric v1.43.0 ## explicit; go 1.25.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/sdk v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/x @@ -390,7 +391,7 @@ go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/internal/env go.opentelemetry.io/otel/sdk/trace/internal/observ -# go.opentelemetry.io/otel/trace v1.42.0 +# go.opentelemetry.io/otel/trace v1.43.0 ## explicit; go 1.25.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -496,7 +497,7 @@ golang.org/x/tools/go/ast/inspector ## explicit; go 1.24.0 google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d +# google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 ## explicit; go 1.25.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status @@ -765,7 +766,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.35.2 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251108023427-891f5bb03061 +# k8s.io/apiserver v0.35.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251108023427-891f5bb03061 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1107,7 +1108,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.35.2 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20251108023427-891f5bb03061 +# k8s.io/component-base v0.35.3 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20251108023427-891f5bb03061 ## explicit; go 1.24.0 k8s.io/component-base/cli/flag k8s.io/component-base/compatibility diff --git a/requirements.txt b/requirements.txt index e2076a4de..b71a7a608 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,8 +27,8 @@ python-dateutil==2.9.0.post0 PyYAML==6.0.3 pyyaml_env_tag==1.1 readtime==3.0.0 -regex==2026.2.28 -requests==2.33.0 +regex==2026.3.32 +requests==2.33.1 six==1.17.0 soupsieve==2.8.3 urllib3==2.6.3 diff --git a/test/e2e/features/revision.feature b/test/e2e/features/revision.feature index dd6d9e940..38ed16027 100644 --- a/test/e2e/features/revision.feature +++ b/test/e2e/features/revision.feature @@ -442,4 +442,162 @@ Feature: Install ClusterObjectSet Then ClusterObjectSet "${COS_NAME}" reports Progressing as True with Reason Succeeded And ClusterObjectSet "${COS_NAME}" reports Available as True with Reason ProbesSucceeded And resource "configmap/test-configmap-ref" is installed - And resource "deployment/test-httpd" is installed \ No newline at end of file + And resource "deployment/test-httpd" is installed + And ClusterObjectSet "${COS_NAME}" has observed phase "resources" with a non-empty digest + + Scenario: ClusterObjectSet blocks reconciliation when referenced Secret is mutable + Given ServiceAccount "olm-sa" with needed permissions is available in test namespace + And resource is applied + """ + apiVersion: v1 + kind: Secret + metadata: + name: ${COS_NAME}-mutable-secret + namespace: ${TEST_NAMESPACE} + type: olm.operatorframework.io/object-data + stringData: + configmap: | + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm-mutable", + "namespace": "${TEST_NAMESPACE}" + }, + "data": { + "key": "value" + } + } + """ + When ClusterObjectSet is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterObjectSet + metadata: + annotations: + olm.operatorframework.io/service-account-name: olm-sa + olm.operatorframework.io/service-account-namespace: ${TEST_NAMESPACE} + name: ${COS_NAME} + spec: + lifecycleState: Active + collisionProtection: Prevent + phases: + - name: resources + objects: + - ref: + name: ${COS_NAME}-mutable-secret + namespace: ${TEST_NAMESPACE} + key: configmap + revision: 1 + """ + Then ClusterObjectSet "${COS_NAME}" reports Progressing as False with Reason Blocked and Message: + """ + the following secrets are not immutable (referenced secrets must have immutable set to true): ${TEST_NAMESPACE}/${COS_NAME}-mutable-secret + """ + + Scenario: ClusterObjectSet blocks reconciliation when referenced Secret content changes + Given ServiceAccount "olm-sa" with needed permissions is available in test namespace + When resource is applied + """ + apiVersion: v1 + kind: Secret + metadata: + name: ${COS_NAME}-change-secret + namespace: ${TEST_NAMESPACE} + immutable: true + type: olm.operatorframework.io/object-data + stringData: + configmap: | + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm-change", + "namespace": "${TEST_NAMESPACE}" + }, + "data": { + "key": "original-value" + } + } + """ + And ClusterObjectSet is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterObjectSet + metadata: + annotations: + olm.operatorframework.io/service-account-name: olm-sa + olm.operatorframework.io/service-account-namespace: ${TEST_NAMESPACE} + name: ${COS_NAME} + spec: + lifecycleState: Active + collisionProtection: Prevent + phases: + - name: resources + objects: + - ref: + name: ${COS_NAME}-change-secret + namespace: ${TEST_NAMESPACE} + key: configmap + revision: 1 + """ + Then ClusterObjectSet "${COS_NAME}" reports Progressing as True with Reason Succeeded + And ClusterObjectSet "${COS_NAME}" reports Available as True with Reason ProbesSucceeded + And ClusterObjectSet "${COS_NAME}" has observed phase "resources" with a non-empty digest + # Delete the immutable Secret and recreate with different content + When resource "secret/${COS_NAME}-change-secret" is removed + And resource is applied + """ + apiVersion: v1 + kind: Secret + metadata: + name: ${COS_NAME}-change-secret + namespace: ${TEST_NAMESPACE} + immutable: true + type: olm.operatorframework.io/object-data + stringData: + configmap: | + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm-change", + "namespace": "${TEST_NAMESPACE}" + }, + "data": { + "key": "TAMPERED-value" + } + } + """ + And ClusterObjectSet "${COS_NAME}" reconciliation is triggered + Then ClusterObjectSet "${COS_NAME}" reports Progressing as False with Reason Blocked and Message includes: + """ + resolved content of 1 phase(s) has changed: phase "resources" + """ + # Restore original content — COS should recover + When resource "secret/${COS_NAME}-change-secret" is removed + And resource is applied + """ + apiVersion: v1 + kind: Secret + metadata: + name: ${COS_NAME}-change-secret + namespace: ${TEST_NAMESPACE} + immutable: true + type: olm.operatorframework.io/object-data + stringData: + configmap: | + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm-change", + "namespace": "${TEST_NAMESPACE}" + }, + "data": { + "key": "original-value" + } + } + """ + And ClusterObjectSet "${COS_NAME}" reconciliation is triggered + Then ClusterObjectSet "${COS_NAME}" reports Progressing as True with Reason Succeeded diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go index 2b5603ab7..c9748d99d 100644 --- a/test/e2e/steps/steps.go +++ b/test/e2e/steps/steps.go @@ -90,8 +90,11 @@ func RegisterSteps(sc *godog.ScenarioContext) { sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutReason) sc.Step(`^(?i)ClusterObjectSet "([^"]+)" reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterObjectSetReportsConditionWithoutMsg) sc.Step(`^(?i)ClusterObjectSet "([^"]+)" reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+) and Message:$`, ClusterObjectSetReportsConditionWithMsg) + sc.Step(`^(?i)ClusterObjectSet "([^"]+)" reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+) and Message includes:$`, ClusterObjectSetReportsConditionWithMessageFragment) sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) transition between (\d+) and (\d+) minutes since its creation$`, ClusterExtensionReportsConditionTransitionTime) sc.Step(`^(?i)ClusterObjectSet is applied(?:\s+.*)?$`, ResourceIsApplied) + sc.Step(`^(?i)ClusterObjectSet "([^"]+)" reconciliation is triggered$`, TriggerClusterObjectSetReconciliation) + sc.Step(`^(?i)ClusterObjectSet "([^"]+)" has observed phase "([^"]+)" with a non-empty digest$`, ClusterObjectSetHasObservedPhase) sc.Step(`^(?i)ClusterObjectSet "([^"]+)" is archived$`, ClusterObjectSetIsArchived) sc.Step(`^(?i)ClusterObjectSet "([^"]+)" contains annotation "([^"]+)" with value$`, ClusterObjectSetHasAnnotationWithValue) sc.Step(`^(?i)ClusterObjectSet "([^"]+)" has label "([^"]+)" with value "([^"]+)"$`, ClusterObjectSetHasLabelWithValue) @@ -508,7 +511,8 @@ func ClusterExtensionReportsConditionWithMessageFragment(ctx context.Context, co if msgFragment != nil { expectedMsgFragment := substituteScenarioVars(strings.Join(strings.Fields(msgFragment.Content), " "), scenarioCtx(ctx)) msgCmp = func(actualMsg string) bool { - return strings.Contains(actualMsg, expectedMsgFragment) + normalizedActual := strings.Join(strings.Fields(actualMsg), " ") + return strings.Contains(normalizedActual, expectedMsgFragment) } } return waitForExtensionCondition(ctx, conditionType, conditionStatus, &conditionReason, msgCmp) @@ -597,6 +601,48 @@ func ClusterObjectSetReportsConditionWithMsg(ctx context.Context, revisionName, return waitForCondition(ctx, "clusterobjectset", substituteScenarioVars(revisionName, scenarioCtx(ctx)), conditionType, conditionStatus, &conditionReason, messageComparison(ctx, msg)) } +// ClusterObjectSetReportsConditionWithMessageFragment waits for the named ClusterObjectSet to have a condition +// matching type, status, reason, with a message containing the specified fragment. Polls with timeout. +func ClusterObjectSetReportsConditionWithMessageFragment(ctx context.Context, revisionName, conditionType, conditionStatus, conditionReason string, msgFragment *godog.DocString) error { + msgCmp := alwaysMatch + if msgFragment != nil { + expectedMsgFragment := substituteScenarioVars(strings.Join(strings.Fields(msgFragment.Content), " "), scenarioCtx(ctx)) + msgCmp = func(actualMsg string) bool { + normalizedActual := strings.Join(strings.Fields(actualMsg), " ") + return strings.Contains(normalizedActual, expectedMsgFragment) + } + } + return waitForCondition(ctx, "clusterobjectset", substituteScenarioVars(revisionName, scenarioCtx(ctx)), conditionType, conditionStatus, &conditionReason, msgCmp) +} + +// TriggerClusterObjectSetReconciliation annotates the named ClusterObjectSet +// to trigger a new reconciliation cycle. +func TriggerClusterObjectSetReconciliation(ctx context.Context, cosName string) error { + sc := scenarioCtx(ctx) + cosName = substituteScenarioVars(cosName, sc) + _, err := k8sClient("annotate", "clusterobjectset", cosName, "--overwrite", + fmt.Sprintf("e2e-trigger=%d", time.Now().UnixNano())) + return err +} + +// ClusterObjectSetHasObservedPhase waits for the named ClusterObjectSet to have +// an observedPhases entry matching the given phase name with a non-empty digest. Polls with timeout. +func ClusterObjectSetHasObservedPhase(ctx context.Context, cosName, phaseName string) error { + sc := scenarioCtx(ctx) + cosName = substituteScenarioVars(cosName, sc) + phaseName = substituteScenarioVars(phaseName, sc) + + waitFor(ctx, func() bool { + out, err := k8sClient("get", "clusterobjectset", cosName, "-o", + fmt.Sprintf(`jsonpath={.status.observedPhases[?(@.name=="%s")].digest}`, phaseName)) + if err != nil { + return false + } + return strings.TrimSpace(out) != "" + }) + return nil +} + // ClusterObjectSetIsArchived waits for the named ClusterObjectSet to have Progressing=False // with reason Archived. Polls with timeout. func ClusterObjectSetIsArchived(ctx context.Context, revisionName string) error { diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml index 38cb9ae10..08081fbde 100644 --- a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -1,104 +1,116 @@ -# Do not delete linter settings. Linters like gocritic can be enabled on the command line. - -linters-settings: - depguard: - rules: - prevent_unmaintained_packages: - list-mode: strict - files: - - $all - - "!$test" - allow: - - $gostd - - github.com/x448/float16 - deny: - - pkg: io/ioutil - desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" - dupl: - threshold: 100 - funlen: - lines: 100 - statements: 50 - goconst: - ignore-tests: true - min-len: 2 - min-occurrences: 3 - gocritic: - enabled-tags: - - diagnostic - - experimental - - opinionated - - performance - - style - disabled-checks: - - commentedOutCode - - dupImport # https://github.com/go-critic/go-critic/issues/845 - - ifElseChain - - octalLiteral - - paramTypeCombine - - whyNoLint - gofmt: - simplify: false - goimports: - local-prefixes: github.com/fxamacker/cbor - golint: - min-confidence: 0 - govet: - check-shadowing: true - lll: - line-length: 140 - maligned: - suggest-new: true - misspell: - locale: US - staticcheck: - checks: ["all"] - +version: "2" linters: - disable-all: true + default: none enable: - asciicheck - bidichk - depguard - errcheck - - exportloopref + - forbidigo - goconst - gocritic - gocyclo - - gofmt - - goimports - goprintffuncname - gosec - - gosimple - govet - ineffassign - misspell - nilerr - revive - staticcheck - - stylecheck - - typecheck - unconvert - unused - + settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - '!$test' + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: 'replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil' + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 3 + gocritic: + disabled-checks: + - commentedOutCode + - dupImport + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + govet: + enable: + - shadow + lll: + line-length: 140 + misspell: + locale: US + staticcheck: + checks: + - all + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - path: decode.go + text: string ` overflows ` has (\d+) occurrences, make it a constant + - path: decode.go + text: string ` \(range is \[` has (\d+) occurrences, make it a constant + - path: decode.go + text: string `, ` has (\d+) occurrences, make it a constant + - path: decode.go + text: string ` overflows Go's int64` has (\d+) occurrences, make it a constant + - path: decode.go + text: string `\]\)` has (\d+) occurrences, make it a constant + - path: valid.go + text: string ` for type ` has (\d+) occurrences, make it a constant + - path: valid.go + text: 'string `cbor: ` has (\d+) occurrences, make it a constant' + - linters: + - goconst + path: (.+)_test\.go + paths: + - third_party$ + - builtin$ + - examples$ issues: - # max-issues-per-linter default is 50. Set to 0 to disable limit. max-issues-per-linter: 0 - # max-same-issues default is 3. Set to 0 to disable limit. max-same-issues: 0 - - exclude-rules: - - path: decode.go - text: "string ` overflows ` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string `, ` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" - - path: decode.go - text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" - - path: valid.go - text: "string ` for type ` has (\\d+) occurrences, make it a constant" - - path: valid.go - text: "string `cbor: ` has (\\d+) occurrences, make it a constant" +formatters: + enable: + - gofmt + - goimports + settings: + gofmt: + simplify: false + goimports: + local-prefixes: + - github.com/fxamacker/cbor + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md index d072b81c7..f9ae78ec9 100644 --- a/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -702,21 +702,20 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. -- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. -- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. -- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. +v2.9.1 (Mar 29-30, 2026) includes important bugfixes, defensive checks, improved code quality, and more tests. Although not public, the fuzzer was also improved by adding more fuzz tests. -v2.9.0 passed fuzz tests and is production quality. +v2.9.1 passed fuzz tests and is production quality. The minimum version of Go required to build: - v2.8.0 and newer releases require go 1.20+. - v2.7.1 and older releases require go 1.17+. -For more details, see [release notes](https://github.com/fxamacker/cbor/releases). +For more details, see [v2.9.1 release notes](https://github.com/fxamacker/cbor/releases). ### Prior Releases +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. It passed fuzz tests (billions of executions) and is production quality. + [v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. [v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go index 5051f110f..5743f3eb2 100644 --- a/vendor/github.com/fxamacker/cbor/v2/cache.go +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -92,94 +92,126 @@ func newTypeInfo(t reflect.Type) *typeInfo { } type decodingStructType struct { - fields fields - fieldIndicesByName map[string]int - err error - toArray bool + fields decodingFields + fieldIndicesByName map[string]int // Only populated if toArray is false + fieldIndicesByIntKey map[int64]int // Only populated if toArray is false + err error + toArray bool } -// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, -// here's a very basic implementation of an aggregated error. -type multierror []error - -func (m multierror) Error() string { - var sb strings.Builder - for i, err := range m { - sb.WriteString(err.Error()) - if i < len(m)-1 { - sb.WriteString(", ") - } - } - return sb.String() -} - -func getDecodingStructType(t reflect.Type) *decodingStructType { +func getDecodingStructType(t reflect.Type) (*decodingStructType, error) { if v, _ := decodingStructTypeCache.Load(t); v != nil { - return v.(*decodingStructType) + structType := v.(*decodingStructType) + if structType.err != nil { + return nil, structType.err + } + return structType, nil } flds, structOptions := getFields(t) toArray := hasToArrayOption(structOptions) - var errs []error - for i := 0; i < len(flds); i++ { - if flds[i].keyAsInt { - nameAsInt, numErr := strconv.Atoi(flds[i].name) - if numErr != nil { - errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) - break + if toArray { + return getDecodingStructToArrayType(t, flds) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + var fieldIndicesByIntKey map[int64]int + + decFlds := make(decodingFields, len(flds)) + for i, f := range flds { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if f.keyAsInt && f.nameAsInt == 0 { + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &decodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err } - flds[i].nameAsInt = int64(nameAsInt) } - flds[i].typInfo = getTypeInfo(flds[i].typ) - } + if f.keyAsInt { + if fieldIndicesByIntKey == nil { + fieldIndicesByIntKey = make(map[int64]int, len(flds)) + } + // The duplication check is only a safeguard, since getFields() already deduplicates fields. + if _, ok := fieldIndicesByIntKey[f.nameAsInt]; ok { + structType := &decodingStructType{ + err: fmt.Errorf("cbor: two or more fields of %v have the same keyasint value %d", t, f.nameAsInt), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err + } + fieldIndicesByIntKey[f.nameAsInt] = i + } else { + // The duplication check is only a safeguard, since getFields() already deduplicates fields. + if _, ok := fieldIndicesByName[f.name]; ok { + structType := &decodingStructType{ + err: fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, f.name), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err + } + fieldIndicesByName[f.name] = i + } - fieldIndicesByName := make(map[string]int, len(flds)) - for i, fld := range flds { - if _, ok := fieldIndicesByName[fld.name]; ok { - errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) - continue + decFlds[i] = &decodingField{ + field: *f, + typInfo: getTypeInfo(f.typ), } - fieldIndicesByName[fld.name] = i } - var err error - { - var multi multierror - for _, each := range errs { - if each != nil { - multi = append(multi, each) + structType := &decodingStructType{ + fields: decFlds, + fieldIndicesByName: fieldIndicesByName, + fieldIndicesByIntKey: fieldIndicesByIntKey, + } + decodingStructTypeCache.Store(t, structType) + return structType, nil +} + +func getDecodingStructToArrayType(t reflect.Type, flds fields) (*decodingStructType, error) { + decFlds := make(decodingFields, len(flds)) + for i, f := range flds { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if f.keyAsInt && f.nameAsInt == 0 { + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &decodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + decodingStructTypeCache.Store(t, structType) + return nil, structType.err } } - if len(multi) == 1 { - err = multi[0] - } else if len(multi) > 1 { - err = multi + + decFlds[i] = &decodingField{ + field: *f, + typInfo: getTypeInfo(f.typ), } } structType := &decodingStructType{ - fields: flds, - fieldIndicesByName: fieldIndicesByName, - err: err, - toArray: toArray, + fields: decFlds, + toArray: true, } decodingStructTypeCache.Store(t, structType) - return structType + return structType, nil } type encodingStructType struct { - fields fields - bytewiseFields fields - lengthFirstFields fields - omitEmptyFieldsIdx []int + fields encodingFields + bytewiseFields encodingFields // Only populated if toArray is false + lengthFirstFields encodingFields // Only populated if toArray is false + omitEmptyFieldsIdx []int // Only populated if toArray is false err error toArray bool } -func (st *encodingStructType) getFields(em *encMode) fields { +func (st *encodingStructType) getFields(em *encMode) encodingFields { switch em.sort { case SortNone, SortFastShuffle: return st.fields @@ -191,7 +223,7 @@ func (st *encodingStructType) getFields(em *encMode) fields { } type bytewiseFieldSorter struct { - fields fields + fields encodingFields } func (x *bytewiseFieldSorter) Len() int { @@ -203,11 +235,11 @@ func (x *bytewiseFieldSorter) Swap(i, j int) { } func (x *bytewiseFieldSorter) Less(i, j int) bool { - return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) < 0 } type lengthFirstFieldSorter struct { - fields fields + fields encodingFields } func (x *lengthFirstFieldSorter) Len() int { @@ -222,13 +254,16 @@ func (x *lengthFirstFieldSorter) Less(i, j int) bool { if len(x.fields[i].cborName) != len(x.fields[j].cborName) { return len(x.fields[i].cborName) < len(x.fields[j].cborName) } - return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) < 0 } func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { if v, _ := encodingStructTypeCache.Load(t); v != nil { structType := v.(*encodingStructType) - return structType, structType.err + if structType.err != nil { + return nil, structType.err + } + return structType, nil } flds, structOptions := getFields(t) @@ -237,111 +272,119 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { return getEncodingStructToArrayType(t, flds) } - var err error var hasKeyAsInt bool var hasKeyAsStr bool var omitEmptyIdx []int + + encFlds := make(encodingFields, len(flds)) + e := getEncodeBuffer() - for i := 0; i < len(flds); i++ { + defer putEncodeBuffer(e) + + for i, f := range flds { + encFlds[i] = &encodingField{field: *f} + ef := encFlds[i] + // Get field's encodeFunc - flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ) - if flds[i].ef == nil { - err = &UnsupportedTypeError{t} - break + ef.ef, ef.ief, ef.izf = getEncodeFunc(f.typ) + if ef.ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return nil, structType.err } // Encode field name - if flds[i].keyAsInt { - nameAsInt, numErr := strconv.Atoi(flds[i].name) - if numErr != nil { - err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") - break + if f.keyAsInt { + if f.nameAsInt == 0 { + // nameAsInt is set in getFields() except for fields with an unparsable tagged name. + // Atoi() is called here to catch and save parsing errors. + if _, numErr := strconv.Atoi(f.name); numErr != nil { + structType := &encodingStructType{ + err: errors.New("cbor: failed to parse field name \"" + f.name + "\" to int (" + numErr.Error() + ")"), + } + encodingStructTypeCache.Store(t, structType) + return nil, structType.err + } } - flds[i].nameAsInt = int64(nameAsInt) + nameAsInt := f.nameAsInt if nameAsInt >= 0 { - encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) //nolint:gosec } else { n := nameAsInt*(-1) - 1 - encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) //nolint:gosec } - flds[i].cborName = make([]byte, e.Len()) - copy(flds[i].cborName, e.Bytes()) + ef.cborName = make([]byte, e.Len()) + copy(ef.cborName, e.Bytes()) e.Reset() hasKeyAsInt = true } else { - encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) - flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) - n := copy(flds[i].cborName, e.Bytes()) - copy(flds[i].cborName[n:], flds[i].name) + encodeHead(e, byte(cborTypeTextString), uint64(len(f.name))) + ef.cborName = make([]byte, e.Len()+len(f.name)) + n := copy(ef.cborName, e.Bytes()) + copy(ef.cborName[n:], f.name) e.Reset() // If cborName contains a text string, then cborNameByteString contains a // string that has the byte string major type but is otherwise identical to // cborName. - flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) - copy(flds[i].cborNameByteString, flds[i].cborName) + ef.cborNameByteString = make([]byte, len(ef.cborName)) + copy(ef.cborNameByteString, ef.cborName) // Reset encoded CBOR type to byte string, preserving the "additional // information" bits: - flds[i].cborNameByteString[0] = byte(cborTypeByteString) | - getAdditionalInformation(flds[i].cborNameByteString[0]) + ef.cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(ef.cborNameByteString[0]) hasKeyAsStr = true } // Check if field can be omitted when empty - if flds[i].omitEmpty { + if f.omitEmpty { omitEmptyIdx = append(omitEmptyIdx, i) } } - putEncodeBuffer(e) - - if err != nil { - structType := &encodingStructType{err: err} - encodingStructTypeCache.Store(t, structType) - return structType, structType.err - } // Sort fields by canonical order - bytewiseFields := make(fields, len(flds)) - copy(bytewiseFields, flds) + bytewiseFields := make(encodingFields, len(encFlds)) + copy(bytewiseFields, encFlds) sort.Sort(&bytewiseFieldSorter{bytewiseFields}) lengthFirstFields := bytewiseFields if hasKeyAsInt && hasKeyAsStr { - lengthFirstFields = make(fields, len(flds)) - copy(lengthFirstFields, flds) + lengthFirstFields = make(encodingFields, len(encFlds)) + copy(lengthFirstFields, encFlds) sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) } structType := &encodingStructType{ - fields: flds, + fields: encFlds, bytewiseFields: bytewiseFields, lengthFirstFields: lengthFirstFields, omitEmptyFieldsIdx: omitEmptyIdx, } encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return structType, nil } func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { - for i := 0; i < len(flds); i++ { - // Get field's encodeFunc - flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ) - if flds[i].ef == nil { + encFlds := make(encodingFields, len(flds)) + for i, f := range flds { + encFlds[i] = &encodingField{field: *f} + encFlds[i].ef, encFlds[i].ief, encFlds[i].izf = getEncodeFunc(f.typ) + if encFlds[i].ef == nil { structType := &encodingStructType{err: &UnsupportedTypeError{t}} encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return nil, structType.err } } structType := &encodingStructType{ - fields: flds, + fields: encFlds, toArray: true, } encodingStructTypeCache.Store(t, structType) - return structType, structType.err + return structType, nil } func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) { diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go index f0bdc3b38..03fd7f8b0 100644 --- a/vendor/github.com/fxamacker/cbor/v2/decode.go +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -16,7 +16,6 @@ import ( "math/big" "reflect" "strconv" - "strings" "time" "unicode/utf8" @@ -326,14 +325,14 @@ func (dmkm DupMapKeyMode) valid() bool { return dmkm >= 0 && dmkm < maxDupMapKeyMode } -// IndefLengthMode specifies whether to allow indefinite length items. +// IndefLengthMode specifies whether to allow indefinite-length items. type IndefLengthMode int const ( - // IndefLengthAllowed allows indefinite length items. + // IndefLengthAllowed allows indefinite-length items. IndefLengthAllowed IndefLengthMode = iota - // IndefLengthForbidden disallows indefinite length items. + // IndefLengthForbidden disallows indefinite-length items. IndefLengthForbidden maxIndefLengthMode @@ -378,6 +377,7 @@ const ( // - int64 if value fits // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 // - return UnmarshalTypeError if value > math.MaxInt64 + // // Deprecated: IntDecConvertSigned should not be used. // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. IntDecConvertSigned @@ -811,7 +811,7 @@ type DecOptions struct { // Default is 128*1024=131072 and it can be set to [16, 2147483647] MaxMapPairs int - // IndefLength specifies whether to allow indefinite length CBOR items. + // IndefLength specifies whether to allow indefinite-length CBOR items. IndefLength IndefLengthMode // TagsMd specifies whether to allow CBOR tags (major type 6). @@ -1055,7 +1055,7 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore } if !opts.ExtraReturnErrors.valid() { - return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) //nolint:gosec } if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { @@ -1149,8 +1149,8 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore unrecognizedTagToAny: opts.UnrecognizedTagToAny, timeTagToAny: opts.TimeTagToAny, simpleValues: simpleValues, - nanDec: opts.NaN, - infDec: opts.Inf, + nan: opts.NaN, + inf: opts.Inf, byteStringToTime: opts.ByteStringToTime, byteStringExpectedFormat: opts.ByteStringExpectedFormat, bignumTag: opts.BignumTag, @@ -1230,8 +1230,8 @@ type decMode struct { unrecognizedTagToAny UnrecognizedTagToAnyMode timeTagToAny TimeTagToAnyMode simpleValues *SimpleValueRegistry - nanDec NaNMode - infDec InfMode + nan NaNMode + inf InfMode byteStringToTime ByteStringToTimeMode byteStringExpectedFormat ByteStringExpectedFormatMode bignumTag BignumTagMode @@ -1272,8 +1272,8 @@ func (dm *decMode) DecOptions() DecOptions { UnrecognizedTagToAny: dm.unrecognizedTagToAny, TimeTagToAny: dm.timeTagToAny, SimpleValues: simpleValues, - NaN: dm.nanDec, - Inf: dm.infDec, + NaN: dm.nan, + Inf: dm.inf, ByteStringToTime: dm.byteStringToTime, ByteStringExpectedFormat: dm.byteStringExpectedFormat, BignumTag: dm.bignumTag, @@ -1583,11 +1583,11 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin _, ai, val := d.getHead() switch ai { case additionalInformationAsFloat16: - f := float64(float16.Frombits(uint16(val)).Float32()) + f := float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec return fillFloat(t, f, v) case additionalInformationAsFloat32: - f := float64(math.Float32frombits(uint32(val))) + f := float64(math.Float32frombits(uint32(val))) //nolint:gosec return fillFloat(t, f, v) case additionalInformationAsFloat64: @@ -1595,10 +1595,10 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin return fillFloat(t, f, v) default: // ai <= 24 - if d.dm.simpleValues.rejected[SimpleValue(val)] { + if d.dm.simpleValues.rejected[SimpleValue(val)] { //nolint:gosec return &UnacceptableDataItemError{ CBORType: t.String(), - Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", //nolint:gosec } } @@ -1677,20 +1677,23 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin return d.parseToValue(v, tInfo) case cborTypeArray: - if tInfo.nonPtrKind == reflect.Slice { + switch tInfo.nonPtrKind { + case reflect.Slice: return d.parseArrayToSlice(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Array { + case reflect.Array: return d.parseArrayToArray(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Struct { + case reflect.Struct: return d.parseArrayToStruct(v, tInfo) } + d.skip() return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} case cborTypeMap: - if tInfo.nonPtrKind == reflect.Struct { + switch tInfo.nonPtrKind { + case reflect.Struct: return d.parseMapToStruct(v, tInfo) - } else if tInfo.nonPtrKind == reflect.Map { + case reflect.Map: return d.parseMapToMap(v, tInfo) } d.skip() @@ -1745,8 +1748,8 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { // Read tag number _, _, tagNum := d.getHead() if tagNum != 0 && tagNum != 1 { - d.skip() // skip tag content - return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") //nolint:gosec } } } else { @@ -1815,10 +1818,10 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { var f float64 switch ai { case additionalInformationAsFloat16: - f = float64(float16.Frombits(uint16(val)).Float32()) + f = float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec case additionalInformationAsFloat32: - f = float64(math.Float32frombits(uint32(val))) + f = float64(math.Float32frombits(uint32(val))) //nolint:gosec case additionalInformationAsFloat64: f = math.Float64frombits(val) @@ -1832,6 +1835,13 @@ func (d *decoder) parseToTime() (time.Time, bool, error) { return time.Time{}, true, nil } seconds, fractional := math.Modf(f) + if seconds > math.MaxInt64 || seconds < math.MinInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%v overflows Go's int64", f), + } + } return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil default: @@ -2145,14 +2155,14 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc case cborTypePrimitives: _, ai, val := d.getHead() - if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { //nolint:gosec return nil, &UnacceptableDataItemError{ CBORType: t.String(), - Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", //nolint:gosec } } if ai < 20 || ai == 24 { - return SimpleValue(val), nil + return SimpleValue(val), nil //nolint:gosec } switch ai { @@ -2165,11 +2175,11 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc return nil, nil case additionalInformationAsFloat16: - f := float64(float16.Frombits(uint16(val)).Float32()) + f := float64(float16.Frombits(uint16(val)).Float32()) //nolint:gosec return f, nil case additionalInformationAsFloat32: - f := float64(math.Float32frombits(uint32(val))) + f := float64(math.Float32frombits(uint32(val))) //nolint:gosec return f, nil case additionalInformationAsFloat64: @@ -2202,16 +2212,16 @@ func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyc func (d *decoder) parseByteString() ([]byte, bool) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() if !indefiniteLength { - b := d.data[d.off : d.off+int(val)] - d.off += int(val) + b := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec return b, false } - // Process indefinite length string chunks. + // Process indefinite-length string chunks. b := []byte{} for !d.foundBreak() { _, _, val = d.getHead() - b = append(b, d.data[d.off:d.off+int(val)]...) - d.off += int(val) + b = append(b, d.data[d.off:d.off+int(val)]...) //nolint:gosec + d.off += int(val) //nolint:gosec } return b, true } @@ -2300,19 +2310,19 @@ func (d *decoder) applyByteStringTextConversion( func (d *decoder) parseTextString() ([]byte, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() if !indefiniteLength { - b := d.data[d.off : d.off+int(val)] - d.off += int(val) + b := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { return nil, &SemanticError{"cbor: invalid UTF-8 string"} } return b, nil } - // Process indefinite length string chunks. + // Process indefinite-length string chunks. b := []byte{} for !d.foundBreak() { _, _, val = d.getHead() - x := d.data[d.off : d.off+int(val)] - d.off += int(val) + x := d.data[d.off : d.off+int(val)] //nolint:gosec + d.off += int(val) //nolint:gosec if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { for !d.foundBreak() { d.skip() // Skip remaining chunk on error @@ -2327,7 +2337,7 @@ func (d *decoder) parseTextString() ([]byte, error) { func (d *decoder) parseArray() ([]any, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance } @@ -2349,7 +2359,7 @@ func (d *decoder) parseArray() ([]any, error) { func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance } @@ -2371,7 +2381,7 @@ func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec gi := 0 vLen := v.Len() var err error @@ -2400,7 +2410,7 @@ func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { func (d *decoder) parseMap() (any, error) { _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec m := make(map[any]any) var k, e any var err, lastErr error @@ -2465,7 +2475,7 @@ func (d *decoder) parseMap() (any, error) { func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if v.IsNil() { mapsize := count if !hasSize { @@ -2566,9 +2576,9 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli } func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { - structType := getDecodingStructType(tInfo.nonPtrType) - if structType.err != nil { - return structType.err + structType, structTypeErr := getDecodingStructType(tInfo.nonPtrType) + if structTypeErr != nil { + return structTypeErr } if !structType.toArray { @@ -2584,7 +2594,7 @@ func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { start := d.off _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec if !hasSize { count = d.numOfItemsUntilBreak() // peek ahead to get array size } @@ -2637,11 +2647,72 @@ func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { return err } -// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +// skipMapEntriesFromIndex skips remaining map entries starting from index i. +func (d *decoder) skipMapEntriesFromIndex(i, count int, hasSize bool) { + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() + d.skip() + } +} + +// skipMapForDupKey skips the current map value and all remaining map entries, +// then returns a DupMapKeyError for the given key at map index i. +func (d *decoder) skipMapForDupKey(dupKey any, i, count int, hasSize bool) error { + // Skip the value of the duplicate key. + d.skip() + // Skip all remaining map entries. + d.skipMapEntriesFromIndex(i+1, count, hasSize) + return &DupMapKeyError{dupKey, i} +} + +// skipMapForUnknownField skips the current map value and all remaining map entries, +// then returns a UnknownFieldError for the given key at map index i. +func (d *decoder) skipMapForUnknownField(i, count int, hasSize bool) error { + // Skip the value of the unknown key. + d.skip() + // Skip all remaining map entries. + d.skipMapEntriesFromIndex(i+1, count, hasSize) + return &UnknownFieldError{i} +} + +// decodeToStructField decodes the next CBOR value into the struct field f in v. +// If the field cannot be resolved, the CBOR value is skipped. +func (d *decoder) decodeToStructField(v reflect.Value, f *decodingField, tInfo *typeInfo) error { + var fv reflect.Value + + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + var err error + fv, err = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if !fv.IsValid() { + d.skip() + return err + } + } + + err := d.parseToValue(fv, f.typInfo) + if err != nil { + if typeError, ok := err.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + } + return err + } + + return nil +} + func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo - structType := getDecodingStructType(tInfo.nonPtrType) - if structType.err != nil { - return structType.err + structType, structTypeErr := getDecodingStructType(tInfo.nonPtrType) + if structTypeErr != nil { + return structTypeErr } if structType.toArray { @@ -2654,14 +2725,12 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n } } - var err, lastErr error - // Get CBOR map size _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() hasSize := !indefiniteLength - count := int(val) + count := int(val) //nolint:gosec - // Keeps track of matched struct fields + // Keep track of matched struct fields to detect duplicate map keys. var foundFldIdx []bool { const maxStackFields = 128 @@ -2675,99 +2744,80 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n } } - // Keeps track of CBOR map keys to detect duplicate map key - keyCount := 0 - var mapKeys map[any]struct{} - - errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + // Keep track of unmatched CBOR map keys to detect duplicate map keys. + var unmatchedMapKeys map[any]struct{} -MapEntryLoop: - for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - var f *field + var err error - // If duplicate field detection is enabled and the key at index j did not match any - // field, k will hold the map key. - var k any + caseInsensitive := d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { t := d.nextCBORType() - if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + + // Reclassify disallowed byte string keys so they fall to the default case. + // keyType is only used for branch control. + keyType := t + if t == cborTypeByteString && d.dm.fieldNameByteString != FieldNameByteStringAllowed { + keyType = 0xff + } + + switch keyType { + case cborTypeTextString, cborTypeByteString: var keyBytes []byte if t == cborTypeTextString { - keyBytes, lastErr = d.parseTextString() - if lastErr != nil { + var parseErr error + keyBytes, parseErr = d.parseTextString() + if parseErr != nil { if err == nil { - err = lastErr + err = parseErr } - d.skip() // skip value + d.skip() // Skip value continue } } else { // cborTypeByteString keyBytes, _ = d.parseByteString() } - // Check for exact match on field name. - if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { - fld := structType.fields[i] + // Find matching struct field (exact match, then case-insensitive fallback). + if fldIdx, ok := findStructFieldByKey(structType, keyBytes, caseInsensitive); ok { + fld := structType.fields[fldIdx] - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{fld.name, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() + switch checkDupField(d.dm, foundFldIdx, fldIdx) { + case mapActionParseValueAndContinue: + if fieldErr := d.decodeToStructField(v, fld, tInfo); fieldErr != nil && err == nil { + err = fieldErr } - return err - } else { - // discard repeated match + continue + case mapActionSkipAllAndReturnError: + return d.skipMapForDupKey(string(keyBytes), i, count, hasSize) + case mapActionSkipValueAndContinue: d.skip() - continue MapEntryLoop + continue } } - // Find field with case-insensitive match - if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { - keyLen := len(keyBytes) - keyString := string(keyBytes) - for i := 0; i < len(structType.fields); i++ { - fld := structType.fields[i] - if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{keyString, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } else { - // discard repeated match - d.skip() - continue MapEntryLoop - } - break - } - } + // No matching struct field found. + if unmatchedErr := handleUnmatchedMapKey(d, string(keyBytes), i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } - if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { - k = string(keyBytes) - } - } else if t <= cborTypeNegativeInt { // uint/int + case cborTypePositiveInt, cborTypeNegativeInt: var nameAsInt int64 if t == cborTypePositiveInt { _, _, val := d.getHead() - nameAsInt = int64(val) + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(val) //nolint:gosec } else { _, _, val := d.getHead() if val > math.MaxInt64 { @@ -2781,39 +2831,35 @@ MapEntryLoop: d.skip() // skip value continue } - nameAsInt = int64(-1) ^ int64(val) - } - - // Find field - for i := 0; i < len(structType.fields); i++ { - fld := structType.fields[i] - if fld.keyAsInt && fld.nameAsInt == nameAsInt { - if !foundFldIdx[i] { - f = fld - foundFldIdx[i] = true - } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - err = &DupMapKeyError{nameAsInt, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } else { - // discard repeated match - d.skip() - continue MapEntryLoop + nameAsInt = int64(-1) ^ int64(val) //nolint:gosec + } + + // Find field by integer key + if fldIdx, ok := structType.fieldIndicesByIntKey[nameAsInt]; ok { + fld := structType.fields[fldIdx] + + switch checkDupField(d.dm, foundFldIdx, fldIdx) { + case mapActionParseValueAndContinue: + if fieldErr := d.decodeToStructField(v, fld, tInfo); fieldErr != nil && err == nil { + err = fieldErr } - break + continue + case mapActionSkipAllAndReturnError: + return d.skipMapForDupKey(nameAsInt, i, count, hasSize) + case mapActionSkipValueAndContinue: + d.skip() + continue } } - if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { - k = nameAsInt + // No matching struct field found. + if unmatchedErr := handleUnmatchedMapKey(d, nameAsInt, i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } - } else { + + default: + // CBOR map keys that can't be matched to any struct field. + if err == nil { err = &UnmarshalTypeError{ CBORType: t.String(), @@ -2821,97 +2867,31 @@ MapEntryLoop: errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", } } + + var otherKey any if d.dm.dupMapKey == DupMapKeyEnforcedAPF { // parse key - k, lastErr = d.parse(true) - if lastErr != nil { + var parseErr error + otherKey, parseErr = d.parse(true) + if parseErr != nil { d.skip() // skip value continue } // Detect if CBOR map key can be used as Go map key. - if !isHashableValue(reflect.ValueOf(k)) { + if !isHashableValue(reflect.ValueOf(otherKey)) { d.skip() // skip value continue } } else { d.skip() // skip key } - } - - if f == nil { - if errOnUnknownField { - err = &UnknownFieldError{j} - d.skip() // Skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } - - // Two map keys that match the same struct field are immediately considered - // duplicates. This check detects duplicates between two map keys that do - // not match a struct field. If unknown field errors are enabled, then this - // check is never reached. - if d.dm.dupMapKey == DupMapKeyEnforcedAPF { - if mapKeys == nil { - mapKeys = make(map[any]struct{}, 1) - } - mapKeys[k] = struct{}{} - newKeyCount := len(mapKeys) - if newKeyCount == keyCount { - err = &DupMapKeyError{k, j} - d.skip() // skip value - j++ - // skip the rest of the map - for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { - d.skip() - d.skip() - } - return err - } - keyCount = newKeyCount - } - - d.skip() // Skip value - continue - } - - // Get field value by index - var fv reflect.Value - if len(f.idx) == 1 { - fv = v.Field(f.idx[0]) - } else { - fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { - // Return a new value for embedded field null pointer to point to, or return error. - if !v.CanSet() { - return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) - } - v.Set(reflect.New(v.Type().Elem())) - return v, nil - }) - if lastErr != nil && err == nil { - err = lastErr - } - if !fv.IsValid() { - d.skip() - continue - } - } - if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { - if err == nil { - if typeError, ok := lastErr.(*UnmarshalTypeError); ok { - typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name - err = typeError - } else { - err = lastErr - } + if unmatchedErr := handleUnmatchedMapKey(d, otherKey, i, count, hasSize, &unmatchedMapKeys); unmatchedErr != nil { + return unmatchedErr } } } + return err } @@ -2958,15 +2938,15 @@ func (d *decoder) skip() { switch t { case cborTypeByteString, cborTypeTextString: - d.off += int(val) + d.off += int(val) //nolint:gosec case cborTypeArray: - for i := 0; i < int(val); i++ { + for i := 0; i < int(val); i++ { //nolint:gosec d.skip() } case cborTypeMap: - for i := 0; i < int(val)*2; i++ { + for i := 0; i < int(val)*2; i++ { //nolint:gosec d.skip() } diff --git a/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go b/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go new file mode 100644 index 000000000..3c8c423ad --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode_map_utils.go @@ -0,0 +1,98 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import "strings" + +// mapAction represents the next action during decoding a CBOR map to a Go struct. +type mapAction int + +const ( + mapActionParseValueAndContinue mapAction = iota // The caller should process the map value. + mapActionSkipValueAndContinue // The caller should skip the map value. + mapActionSkipAllAndReturnError // The caller should skip the rest of the map and return an error. +) + +// checkDupField checks if a struct field at index i has already been matched and returns the next action. +// If not matched, it marks the field as matched and returns mapActionParseValueAndContinue. +// If matched and DupMapKeyEnforcedAPF is specified in the given dm, it returns mapActionSkipAllAndReturnError. +// If matched and DupMapKeyEnforcedAPF is not specified in the given dm, it returns mapActionSkipValueAndContinue. +func checkDupField(dm *decMode, foundFldIdx []bool, i int) mapAction { + if !foundFldIdx[i] { + foundFldIdx[i] = true + return mapActionParseValueAndContinue + } + if dm.dupMapKey == DupMapKeyEnforcedAPF { + return mapActionSkipAllAndReturnError + } + return mapActionSkipValueAndContinue +} + +// findStructFieldByKey finds a struct field matching keyBytes by name. +// It tries an exact match first. If no exact match is found and +// caseInsensitive is true, it falls back to a case-insensitive search. +// findStructFieldByKey returns the field index and true, or -1 and false. +func findStructFieldByKey( + structType *decodingStructType, + keyBytes []byte, + caseInsensitive bool, +) (int, bool) { + if fldIdx, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + return fldIdx, true + } + if caseInsensitive { + return findFieldCaseInsensitive(structType.fields, string(keyBytes)) + } + return -1, false +} + +// findFieldCaseInsensitive returns the index of the first field whose name +// case-insensitively matches key, or -1 and false if no field matches. +func findFieldCaseInsensitive(flds decodingFields, key string) (int, bool) { + keyLen := len(key) + for i, f := range flds { + if f.keyAsInt { + continue + } + if len(f.name) == keyLen && strings.EqualFold(f.name, key) { + return i, true + } + } + return -1, false +} + +// handleUnmatchedMapKey handles a map entry whose key does not match any struct +// field. It can return UnknownFieldError or DupMapKeyError. +// handleUnmatchedMapKey consumes the CBOR value, so the caller doesn't need to skip any values. +// If an error is returned, the caller should abort parsing the map and return the error. +// If no error is returned, the caller should continue to process the next map pair. +func handleUnmatchedMapKey( + d *decoder, + key any, + i int, + count int, + hasSize bool, + // *map[any]struct{} is used here because we use lazy initialization for uks + uks *map[any]struct{}, //nolint:gocritic +) error { + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + + if errOnUnknownField { + return d.skipMapForUnknownField(i, count, hasSize) + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if *uks == nil { + *uks = make(map[any]struct{}) + } + if _, dup := (*uks)[key]; dup { + return d.skipMapForDupKey(key, i, count, hasSize) + } + (*uks)[key] = struct{}{} + } + + // Skip value. + d.skip() + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go index 44afb8660..42a67ad11 100644 --- a/vendor/github.com/fxamacker/cbor/v2/diagnose.go +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -51,11 +51,8 @@ const ( maxByteStringEncoding ) -func (bse ByteStringEncoding) valid() error { - if bse >= maxByteStringEncoding { - return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) - } - return nil +func (bse ByteStringEncoding) valid() bool { + return bse < maxByteStringEncoding } // DiagOptions specifies Diag options. @@ -104,8 +101,8 @@ func (opts DiagOptions) DiagMode() (DiagMode, error) { } func (opts DiagOptions) diagMode() (*diagMode, error) { - if err := opts.ByteStringEncoding.valid(); err != nil { - return nil, err + if !opts.ByteStringEncoding.valid() { + return nil, errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(opts.ByteStringEncoding))) } decMode, err := DecOptions{ @@ -360,7 +357,7 @@ func (di *diagnose) item() error { //nolint:gocyclo case cborTypeArray: _, _, val := di.d.getHead() - count := int(val) + count := int(val) //nolint:gosec di.w.WriteByte('[') for i := 0; i < count; i++ { @@ -376,7 +373,7 @@ func (di *diagnose) item() error { //nolint:gocyclo case cborTypeMap: _, _, val := di.d.getHead() - count := int(val) + count := int(val) //nolint:gosec di.w.WriteByte('{') for i := 0; i < count; i++ { @@ -477,8 +474,8 @@ func (di *diagnose) item() error { //nolint:gocyclo func (di *diagnose) writeU16(val rune) { di.w.WriteString("\\u") var in [2]byte - in[0] = byte(val >> 8) - in[1] = byte(val) + in[0] = byte(val >> 8) //nolint:gosec + in[1] = byte(val) //nolint:gosec sz := hex.EncodedLen(len(in)) di.w.Grow(sz) dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] @@ -608,7 +605,7 @@ func (di *diagnose) encodeTextString(val string, quote byte) error { c, size := utf8.DecodeRuneInString(val[i:]) switch { - case c == utf8.RuneError: + case c == utf8.RuneError && size == 1: return &SemanticError{"cbor: invalid UTF-8 string"} case c < utf16SurrSelf: @@ -631,7 +628,7 @@ func (di *diagnose) encodeFloat(ai byte, val uint64) error { f64 := float64(0) switch ai { case additionalInformationAsFloat16: - f16 := float16.Frombits(uint16(val)) + f16 := float16.Frombits(uint16(val)) //nolint:gosec switch { case f16.IsNaN(): di.w.WriteString("NaN") @@ -647,7 +644,7 @@ func (di *diagnose) encodeFloat(ai byte, val uint64) error { } case additionalInformationAsFloat32: - f32 := math.Float32frombits(uint32(val)) + f32 := math.Float32frombits(uint32(val)) //nolint:gosec switch { case f32 != f32: di.w.WriteString("NaN") diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go index c550617c3..e65a29d8a 100644 --- a/vendor/github.com/fxamacker/cbor/v2/encode.go +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -30,7 +30,7 @@ import ( // If value implements the Marshaler interface, Marshal calls its // MarshalCBOR method. // -// If value implements encoding.BinaryMarshaler, Marhsal calls its +// If value implements encoding.BinaryMarshaler, Marshal calls its // MarshalBinary method and encode it as CBOR byte string. // // Boolean values encode as CBOR booleans (type 7). @@ -343,7 +343,7 @@ const ( // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339. // NOTE: User applications can avoid including the RFC3339 numeric offset by: // - providing a time.Time value set to UTC, or - // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339. + // - using the TimeUnix, TimeUnixMicro, TimeUnixDynamic, or TimeRFC3339NanoUTC option. TimeRFC3339 // TimeRFC3339Nano causes time.Time to encode to a CBOR time (tag 0) with a text string content @@ -351,9 +351,13 @@ const ( // non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339. // NOTE: User applications can avoid including the RFC3339 numeric offset by: // - providing a time.Time value set to UTC, or - // - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339Nano. + // - using the TimeUnix, TimeUnixMicro, TimeUnixDynamic, or TimeRFC3339NanoUTC option. TimeRFC3339Nano + // TimeRFC3339NanoUTC causes time.Time to encode to a CBOR time (tag 0) with a text string content + // representing UTC time using nanosecond precision in RFC3339 format. + TimeRFC3339NanoUTC + maxTimeMode ) @@ -436,7 +440,7 @@ const ( // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). FieldNameToTextString FieldNameMode = iota - // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + // FieldNameToByteString encodes struct fields to CBOR byte string (major type 2). FieldNameToByteString maxFieldNameMode @@ -567,7 +571,7 @@ type EncOptions struct { // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. TimeTag EncTagMode - // IndefLength specifies whether to allow indefinite length CBOR items. + // IndefLength specifies whether to allow indefinite-length CBOR items. IndefLength IndefLengthMode // NilContainers specifies how to encode nil slices and maps. @@ -1132,10 +1136,11 @@ func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { if fopt == ShortestFloat16 { var f16 float16.Float16 p := float16.PrecisionFromfloat32(f32) - if p == float16.PrecisionExact { + switch p { + case float16.PrecisionExact: // Roundtrip float32->float16->float32 test isn't needed. f16 = float16.Fromfloat32(f32) - } else if p == float16.PrecisionUnknown { + case float16.PrecisionUnknown: // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. f16 = float16.Fromfloat32(f32) if f16.Float32() == f32 { @@ -1293,10 +1298,10 @@ func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { if slen == 0 { return e.WriteByte(byte(cborTypeByteString)) } - encodeHead(e, byte(cborTypeByteString), uint64(slen)) + encodeHead(e, byte(cborTypeByteString), uint64(slen)) //nolint:gosec if vk == reflect.Array { for i := 0; i < slen; i++ { - e.WriteByte(byte(v.Index(i).Uint())) + e.WriteByte(byte(v.Index(i).Uint())) //nolint:gosec } return nil } @@ -1333,7 +1338,7 @@ func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) if alen == 0 { return e.WriteByte(byte(cborTypeArray)) } - encodeHead(e, byte(cborTypeArray), uint64(alen)) + encodeHead(e, byte(cborTypeArray), uint64(alen)) //nolint:gosec for i := 0; i < alen; i++ { if err := ae.f(e, em, v.Index(i)); err != nil { return err @@ -1364,7 +1369,7 @@ func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) er return e.WriteByte(byte(cborTypeMap)) } - encodeHead(e, byte(cborTypeMap), uint64(mlen)) + encodeHead(e, byte(cborTypeMap), uint64(mlen)) //nolint:gosec if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { return me.e(e, em, v, nil) } @@ -1427,7 +1432,7 @@ func (x *bytewiseKeyValueSorter) Swap(i, j int) { func (x *bytewiseKeyValueSorter) Less(i, j int) bool { kvi, kvj := x.kvs[i], x.kvs[j] - return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) < 0 } type lengthFirstKeyValueSorter struct { @@ -1448,7 +1453,7 @@ func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { return keyLengthDifference < 0 } - return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) < 0 } var keyValuePool = sync.Pool{} @@ -1535,8 +1540,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // Head is rewritten later if actual encoded field count is different from struct field count. encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) - kvbegin := e.Len() - kvcount := 0 + kvBeginOffset := e.Len() + kvCount := 0 for offset := 0; offset < len(flds); offset++ { f := flds[(start+offset)%len(flds)] @@ -1582,10 +1587,10 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { return err } - kvcount++ + kvCount++ } - if len(flds) == kvcount { + if len(flds) == kvCount { // Encoded element count in head is the same as actual element count. return nil } @@ -1593,8 +1598,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // Overwrite the bytes that were reserved for the head before encoding the map entries. var actualHeadLen int { - headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) - actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + headbuf := *bytes.NewBuffer(e.Bytes()[kvBeginOffset-encodedHeadLen : kvBeginOffset-encodedHeadLen : kvBeginOffset]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvCount)) } if actualHeadLen == encodedHeadLen { @@ -1607,8 +1612,8 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { // encoded. The encoded entries are offset to the right by the number of excess reserved // bytes. Shift the entries left to remove the gap. excessReservedBytes := encodedHeadLen - actualHeadLen - dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] - src := e.Bytes()[kvbegin:e.Len()] + dst := e.Bytes()[kvBeginOffset-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvBeginOffset:e.Len()] copy(dst, src) // After shifting, the excess bytes are at the end of the output buffer and they are @@ -1633,7 +1638,7 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { } if em.timeTag == EncTagRequired { tagNumber := 1 - if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano || em.time == TimeRFC3339NanoUTC { tagNumber = 0 } encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) @@ -1650,7 +1655,7 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { case TimeUnixDynamic: t = t.UTC().Round(time.Microsecond) - secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) //nolint:gosec if nsecs == 0 { return encodeInt(e, em, reflect.ValueOf(secs)) } @@ -1661,6 +1666,10 @@ func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { s := t.Format(time.RFC3339) return encodeString(e, em, reflect.ValueOf(s)) + case TimeRFC3339NanoUTC: + s := t.UTC().Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + default: // TimeRFC3339Nano s := t.Format(time.RFC3339Nano) return encodeString(e, em, reflect.ValueOf(s)) diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go index 30f72814f..9912e855c 100644 --- a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -93,6 +93,6 @@ func (sv *SimpleValue) unmarshalCBOR(data []byte) error { // It is safe to cast val to uint8 here because // - data is already verified to be well-formed CBOR simple value and // - val is <= math.MaxUint8. - *sv = SimpleValue(val) + *sv = SimpleValue(val) //nolint:gosec return nil } diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go index 7ac6d7d67..da4c8f210 100644 --- a/vendor/github.com/fxamacker/cbor/v2/stream.go +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -171,14 +171,20 @@ func NewEncoder(w io.Writer) *Encoder { // Encode writes the CBOR encoding of v. func (enc *Encoder) Encode(v any) error { - if len(enc.indefTypes) > 0 && v != nil { - indefType := enc.indefTypes[len(enc.indefTypes)-1] - if indefType == cborTypeTextString { + if len(enc.indefTypes) > 0 { + switch enc.indefTypes[len(enc.indefTypes)-1] { + case cborTypeTextString: + if v == nil { + return errors.New("cbor: cannot encode nil for indefinite-length text string") + } k := reflect.TypeOf(v).Kind() if k != reflect.String { return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") } - } else if indefType == cborTypeByteString { + case cborTypeByteString: + if v == nil { + return errors.New("cbor: cannot encode nil for indefinite-length byte string") + } t := reflect.TypeOf(v) k := t.Kind() if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { @@ -198,35 +204,35 @@ func (enc *Encoder) Encode(v any) error { return err } -// StartIndefiniteByteString starts byte string encoding of indefinite length. +// StartIndefiniteByteString starts indefinite-length byte string encoding. // Subsequent calls of (*Encoder).Encode() encodes definite length byte strings // ("chunks") as one contiguous string until EndIndefinite is called. func (enc *Encoder) StartIndefiniteByteString() error { return enc.startIndefinite(cborTypeByteString) } -// StartIndefiniteTextString starts text string encoding of indefinite length. +// StartIndefiniteTextString starts indefinite-length text string encoding. // Subsequent calls of (*Encoder).Encode() encodes definite length text strings // ("chunks") as one contiguous string until EndIndefinite is called. func (enc *Encoder) StartIndefiniteTextString() error { return enc.startIndefinite(cborTypeTextString) } -// StartIndefiniteArray starts array encoding of indefinite length. +// StartIndefiniteArray starts indefinite-length array encoding. // Subsequent calls of (*Encoder).Encode() encodes elements of the array // until EndIndefinite is called. func (enc *Encoder) StartIndefiniteArray() error { return enc.startIndefinite(cborTypeArray) } -// StartIndefiniteMap starts array encoding of indefinite length. +// StartIndefiniteMap starts indefinite-length map encoding. // Subsequent calls of (*Encoder).Encode() encodes elements of the map // until EndIndefinite is called. func (enc *Encoder) StartIndefiniteMap() error { return enc.startIndefinite(cborTypeMap) } -// EndIndefinite closes last opened indefinite length value. +// EndIndefinite closes last opened indefinite-length value. func (enc *Encoder) EndIndefinite() error { if len(enc.indefTypes) == 0 { return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") @@ -238,18 +244,22 @@ func (enc *Encoder) EndIndefinite() error { return err } -var cborIndefHeader = map[cborType][]byte{ - cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, - cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, - cborTypeArray: {cborArrayWithIndefiniteLengthHead}, - cborTypeMap: {cborMapWithIndefiniteLengthHead}, -} - func (enc *Encoder) startIndefinite(typ cborType) error { if enc.em.indefLength == IndefLengthForbidden { return &IndefiniteLengthError{typ} } - _, err := enc.w.Write(cborIndefHeader[typ]) + var head byte + switch typ { + case cborTypeByteString: + head = cborByteStringWithIndefiniteLengthHead + case cborTypeTextString: + head = cborTextStringWithIndefiniteLengthHead + case cborTypeArray: + head = cborArrayWithIndefiniteLengthHead + case cborTypeMap: + head = cborMapWithIndefiniteLengthHead + } + _, err := enc.w.Write([]byte{head}) if err == nil { enc.indefTypes = append(enc.indefTypes, typ) } @@ -262,7 +272,9 @@ type RawMessage []byte // MarshalCBOR returns m or CBOR nil if m is nil. func (m RawMessage) MarshalCBOR() ([]byte, error) { if len(m) == 0 { - return cborNil, nil + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil } return m, nil } diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go index cf0a922cd..b2d71f2e9 100644 --- a/vendor/github.com/fxamacker/cbor/v2/structfields.go +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -6,27 +6,43 @@ package cbor import ( "reflect" "sort" + "strconv" "strings" ) +// field holds shared struct field metadata returned by getFields(). type field struct { - name string - nameAsInt int64 // used to decoder to match field name with CBOR int + name string + nameAsInt int64 // used to match field name with CBOR int + idx []int + typ reflect.Type // used during cache building only + keyAsInt bool // used to encode/decode field name as int + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + omitZero bool // used to skip zero field +} + +type fields []*field + +// encodingField extends field with encoding-specific data. +type encodingField struct { + field cborName []byte - cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 - idx []int - typ reflect.Type + cborNameByteString []byte // major type 2 name encoding if cborName has major type 3 ef encodeFunc ief isEmptyFunc izf isZeroFunc - typInfo *typeInfo // used to decoder to reuse type info - tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) - omitEmpty bool // used to skip empty field - omitZero bool // used to skip zero field - keyAsInt bool // used to encode/decode field name as int } -type fields []*field +type encodingFields []*encodingField + +// decodingField extends field with decoding-specific data. +type decodingField struct { + field + typInfo *typeInfo // used by decoder to reuse type info +} + +type decodingFields []*decodingField // indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. type indexFieldSorter struct { @@ -48,7 +64,7 @@ func (x *indexFieldSorter) Less(i, j int) bool { return iIdx[k] < jIdx[k] } } - return len(iIdx) <= len(jIdx) + return len(iIdx) < len(jIdx) } // nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. @@ -69,6 +85,10 @@ func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { if fi.name != fj.name { return fi.name < fj.name } + // Fields with the same name but different keyAsInt are in separate namespaces. + if fi.keyAsInt != fj.keyAsInt { + return fi.keyAsInt + } if len(fi.idx) != len(fj.idx) { return len(fi.idx) < len(fj.idx) } @@ -117,22 +137,37 @@ func getFields(t reflect.Type) (flds fields, structOptions string) { } } + // Normalize keyasint field names to their canonical integer string form. + // This ensures that "01", "+1", and "1" are treated as the same key + // during deduplication. + for _, f := range flds { + if f.keyAsInt { + nameAsInt, err := strconv.Atoi(f.name) + if err != nil { + continue // Leave invalid names for callers to report. + } + f.nameAsInt = int64(nameAsInt) + f.name = strconv.Itoa(nameAsInt) + } + } + sort.Sort(&nameLevelAndTagFieldSorter{flds}) // Keep visible fields. j := 0 // index of next unique field for i := 0; i < len(flds); { name := flds[i].name + keyAsInt := flds[i].keyAsInt if i == len(flds)-1 || // last field - name != flds[i+1].name || // field i has unique field name + name != flds[i+1].name || flds[i+1].keyAsInt != keyAsInt || // field i has unique (name, keyAsInt) len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not flds[j] = flds[i] j++ } - // Skip fields with the same field name. - for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + // Skip fields with the same (name, keyAsInt). + for i++; i < len(flds) && name == flds[i].name && keyAsInt == flds[i].keyAsInt; i++ { //nolint:revive } } if j != len(flds) { diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go index b40793b95..850b95019 100644 --- a/vendor/github.com/fxamacker/cbor/v2/valid.go +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -54,7 +54,7 @@ func (e *MaxMapPairsError) Error() string { return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" } -// IndefiniteLengthError indicates found disallowed indefinite length items. +// IndefiniteLengthError indicates found disallowed indefinite-length items. type IndefiniteLengthError struct { t cborType } @@ -113,7 +113,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err } return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) } - valInt := int(val) + valInt := int(val) //nolint:gosec if valInt < 0 { // Detect integer overflow return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") @@ -136,7 +136,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) } - valInt := int(val) + valInt := int(val) //nolint:gosec if valInt < 0 { // Detect integer overflow return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") @@ -212,7 +212,7 @@ func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, err return depth, nil } -// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +// wellformedIndefiniteString checks indefinite-length byte/text string's well-formedness and returns max depth and error. func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { var err error for { @@ -223,7 +223,7 @@ func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltin d.off++ break } - // Peek ahead to get next type and indefinite length status. + // Peek ahead to get next type and indefinite-length status. nt, ai := parseInitialByte(d.data[d.off]) if t != nt { return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} @@ -238,7 +238,7 @@ func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltin return depth, nil } -// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +// wellformedIndefiniteArrayOrMap checks indefinite-length array/map's well-formedness and returns max depth and error. func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { var err error maxDepth := depth @@ -326,7 +326,7 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) d.off += argumentSize if t == cborTypePrimitives { - if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { //nolint:gosec return 0, 0, 0, err } } @@ -341,7 +341,7 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) d.off += argumentSize if t == cborTypePrimitives { - if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { //nolint:gosec return 0, 0, 0, err } } @@ -379,12 +379,12 @@ func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) func (d *decoder) acceptableFloat(f float64) error { switch { - case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + case d.dm.nan == NaNDecodeForbidden && math.IsNaN(f): return &UnacceptableDataItemError{ CBORType: cborTypePrimitives.String(), Message: "floating-point NaN", } - case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + case d.dm.inf == InfDecodeForbidden && math.IsInf(f, 0): return &UnacceptableDataItemError{ CBORType: cborTypePrimitives.String(), Message: "floating-point infinity", diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index 41a0c9ef8..4c27973bd 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,7 @@ #ifndef USE_LIBSQLITE3 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.51.2. By combining all the individual C code files into this +** version 3.51.3. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -19,7 +19,7 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** b270f8339eb13b504d0b2ba154ebca966b7d with changes in files: +** 737ae4a34738ffa0c3ff7f9bb18df914dd1c with changes in files: ** ** */ @@ -468,12 +468,12 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.51.2" -#define SQLITE_VERSION_NUMBER 3051002 -#define SQLITE_SOURCE_ID "2026-01-09 17:27:48 b270f8339eb13b504d0b2ba154ebca966b7dde08e40c3ed7d559749818cb2075" +#define SQLITE_VERSION "3.51.3" +#define SQLITE_VERSION_NUMBER 3051003 +#define SQLITE_SOURCE_ID "2026-03-13 10:38:09 737ae4a34738ffa0c3ff7f9bb18df914dd1cad163f28fd6b6e114a344fe6d618" #define SQLITE_SCM_BRANCH "branch-3.51" -#define SQLITE_SCM_TAGS "release version-3.51.2" -#define SQLITE_SCM_DATETIME "2026-01-09T17:27:48.405Z" +#define SQLITE_SCM_TAGS "release version-3.51.3" +#define SQLITE_SCM_DATETIME "2026-03-13T10:38:09.694Z" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -14335,6 +14335,27 @@ struct fts5_api { #endif #define SQLITE_MIN_LENGTH 30 /* Minimum value for the length limit */ +/* +** Maximum size of any single memory allocation. +** +** This is not a limit on the total amount of memory used. This is +** a limit on the size parameter to sqlite3_malloc() and sqlite3_realloc(). +** +** The upper bound is slightly less than 2GiB: 0x7ffffeff == 2,147,483,391 +** This provides a 256-byte safety margin for defense against 32-bit +** signed integer overflow bugs when computing memory allocation sizes. +** Paranoid applications might want to reduce the maximum allocation size +** further for an even larger safety margin. 0x3fffffff or 0x0fffffff +** or even smaller would be reasonable upper bounds on the size of a memory +** allocations for most applications. +*/ +#ifndef SQLITE_MAX_ALLOCATION_SIZE +# define SQLITE_MAX_ALLOCATION_SIZE 2147483391 +#endif +#if SQLITE_MAX_ALLOCATION_SIZE>2147483391 +# error Maximum size for SQLITE_MAX_ALLOCATION_SIZE is 2147483391 +#endif + /* ** This is the maximum number of ** @@ -21665,6 +21686,7 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList Expr*,ExprList*,u32,Expr*); SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3*,void*); +SQLITE_PRIVATE void sqlite3SelectCheckOnClauses(Parse *pParse, Select *pSelect); SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, Trigger*); SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); @@ -31311,27 +31333,6 @@ static void mallocWithAlarm(int n, void **pp){ *pp = p; } -/* -** Maximum size of any single memory allocation. -** -** This is not a limit on the total amount of memory used. This is -** a limit on the size parameter to sqlite3_malloc() and sqlite3_realloc(). -** -** The upper bound is slightly less than 2GiB: 0x7ffffeff == 2,147,483,391 -** This provides a 256-byte safety margin for defense against 32-bit -** signed integer overflow bugs when computing memory allocation sizes. -** Paranoid applications might want to reduce the maximum allocation size -** further for an even larger safety margin. 0x3fffffff or 0x0fffffff -** or even smaller would be reasonable upper bounds on the size of a memory -** allocations for most applications. -*/ -#ifndef SQLITE_MAX_ALLOCATION_SIZE -# define SQLITE_MAX_ALLOCATION_SIZE 2147483391 -#endif -#if SQLITE_MAX_ALLOCATION_SIZE>2147483391 -# error Maximum size for SQLITE_MAX_ALLOCATION_SIZE is 2147483391 -#endif - /* ** Allocate memory. This routine is like sqlite3_malloc() except that it ** assumes the memory subsystem has already been initialized. @@ -31555,8 +31556,7 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){ sqlite3_free(pOld); /* IMP: R-26507-47431 */ return 0; } - if( nBytes>=0x7fffff00 ){ - /* The 0x7ffff00 limit term is explained in comments on sqlite3Malloc() */ + if( nBytes>SQLITE_MAX_ALLOCATION_SIZE ){ return 0; } nOld = sqlite3MallocSize(pOld); @@ -69011,68 +69011,82 @@ static int walCheckpoint( && (rc = walBusyLock(pWal,xBusy,pBusyArg,WAL_READ_LOCK(0),1))==SQLITE_OK ){ u32 nBackfill = pInfo->nBackfill; - pInfo->nBackfillAttempted = mxSafeFrame; SEH_INJECT_FAULT; - - /* Sync the WAL to disk */ - rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags)); - - /* If the database may grow as a result of this checkpoint, hint - ** about the eventual size of the db file to the VFS layer. - */ - if( rc==SQLITE_OK ){ - i64 nReq = ((i64)mxPage * szPage); - i64 nSize; /* Current size of database file */ - sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_CKPT_START, 0); - rc = sqlite3OsFileSize(pWal->pDbFd, &nSize); - if( rc==SQLITE_OK && nSizehdr.mxFrame*szPage)pDbFd, SQLITE_FCNTL_SIZE_HINT,&nReq); + WalIndexHdr *pLive = (WalIndexHdr*)walIndexHdr(pWal); + + /* Now that read-lock slot 0 is locked, check that the wal has not been + ** wrapped since the header was read for this checkpoint. If it was, then + ** there was no work to do anyway. In this case the + ** (pInfo->nBackfillhdr.mxFrame) test above only passed because + ** pInfo->nBackfill had already been set to 0 by the writer that wrapped + ** the wal file. It would also be dangerous to proceed, as there may be + ** fewer than pWal->hdr.mxFrame valid frames in the wal file. */ + int bChg = memcmp(pLive->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt)); + if( 0==bChg ){ + pInfo->nBackfillAttempted = mxSafeFrame; SEH_INJECT_FAULT; + + /* Sync the WAL to disk */ + rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags)); + + /* If the database may grow as a result of this checkpoint, hint + ** about the eventual size of the db file to the VFS layer. + */ + if( rc==SQLITE_OK ){ + i64 nReq = ((i64)mxPage * szPage); + i64 nSize; /* Current size of database file */ + sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_CKPT_START, 0); + rc = sqlite3OsFileSize(pWal->pDbFd, &nSize); + if( rc==SQLITE_OK && nSizehdr.mxFrame*szPage)pDbFd, SQLITE_FCNTL_SIZE_HINT, &nReq); + } } - } - - } - /* Iterate through the contents of the WAL, copying data to the db file */ - while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ - i64 iOffset; - assert( walFramePgno(pWal, iFrame)==iDbpage ); - SEH_INJECT_FAULT; - if( AtomicLoad(&db->u1.isInterrupted) ){ - rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT; - break; } - if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ){ - continue; - } - iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE; - /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */ - rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset); - if( rc!=SQLITE_OK ) break; - iOffset = (iDbpage-1)*(i64)szPage; - testcase( IS_BIG_INT(iOffset) ); - rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset); - if( rc!=SQLITE_OK ) break; - } - sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_CKPT_DONE, 0); - /* If work was actually accomplished... */ - if( rc==SQLITE_OK ){ - if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){ - i64 szDb = pWal->hdr.nPage*(i64)szPage; - testcase( IS_BIG_INT(szDb) ); - rc = sqlite3OsTruncate(pWal->pDbFd, szDb); - if( rc==SQLITE_OK ){ - rc = sqlite3OsSync(pWal->pDbFd, CKPT_SYNC_FLAGS(sync_flags)); + /* Iterate through the contents of the WAL, copying data to the + ** db file */ + while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ + i64 iOffset; + assert( walFramePgno(pWal, iFrame)==iDbpage ); + SEH_INJECT_FAULT; + if( AtomicLoad(&db->u1.isInterrupted) ){ + rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT; + break; } + if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ){ + continue; + } + iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE; + /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */ + rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset); + if( rc!=SQLITE_OK ) break; + iOffset = (iDbpage-1)*(i64)szPage; + testcase( IS_BIG_INT(iOffset) ); + rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset); + if( rc!=SQLITE_OK ) break; } + sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_CKPT_DONE, 0); + + /* If work was actually accomplished... */ if( rc==SQLITE_OK ){ - AtomicStore(&pInfo->nBackfill, mxSafeFrame); SEH_INJECT_FAULT; + if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){ + i64 szDb = pWal->hdr.nPage*(i64)szPage; + testcase( IS_BIG_INT(szDb) ); + rc = sqlite3OsTruncate(pWal->pDbFd, szDb); + if( rc==SQLITE_OK ){ + rc = sqlite3OsSync(pWal->pDbFd, CKPT_SYNC_FLAGS(sync_flags)); + } + } + if( rc==SQLITE_OK ){ + AtomicStore(&pInfo->nBackfill, mxSafeFrame); SEH_INJECT_FAULT; + } } } @@ -71122,6 +71136,7 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( /* Copy data from the log to the database file. */ if( rc==SQLITE_OK ){ + sqlite3FaultSim(660); if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ rc = SQLITE_CORRUPT_BKPT; }else if( eMode2!=SQLITE_CHECKPOINT_NOOP ){ @@ -77557,7 +77572,7 @@ static int accessPayload( getCellInfo(pCur); aPayload = pCur->info.pPayload; - assert( offset+amt <= pCur->info.nPayload ); + assert( (u64)offset+(u64)amt <= (u64)pCur->info.nPayload ); assert( aPayload > pPage->aData ); if( (uptr)(aPayload - pPage->aData) > (pBt->usableSize - pCur->info.nLocal) ){ @@ -86031,7 +86046,12 @@ SQLITE_PRIVATE int sqlite3VdbeMemFromBtree( ){ int rc; pMem->flags = MEM_Null; - if( sqlite3BtreeMaxRecordSize(pCur)=SQLITE_MAX_ALLOCATION_SIZE ){ + return SQLITE_NOMEM_BKPT; + } + if( (u64)amt + (u64)offset > (u64)sqlite3BtreeMaxRecordSize(pCur) ){ return SQLITE_CORRUPT_BKPT; } if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+1)) ){ @@ -93444,7 +93464,7 @@ static int valueFromValueList( Mem sMem; /* Raw content of current row */ memset(&sMem, 0, sizeof(sMem)); sz = sqlite3BtreePayloadSize(pRhs->pCsr); - rc = sqlite3VdbeMemFromBtreeZeroOffset(pRhs->pCsr,(int)sz,&sMem); + rc = sqlite3VdbeMemFromBtreeZeroOffset(pRhs->pCsr,sz,&sMem); if( rc==SQLITE_OK ){ u8 *zBuf = (u8*)sMem.z; u32 iSerial; @@ -111186,6 +111206,14 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ return WRC_Abort; } + /* If the SELECT statement contains ON clauses that were moved into + ** the WHERE clause, go through and verify that none of the terms + ** in the ON clauses reference tables to the right of the ON clause. */ + if( (p->selFlags & SF_OnToWhere) ){ + sqlite3SelectCheckOnClauses(pParse, p); + if( pParse->nErr ) return WRC_Abort; + } + /* Advance to the next term of the compound */ p = p->pPrior; @@ -154357,7 +154385,7 @@ static int selectCheckOnClausesSelect(Walker *pWalker, Select *pSelect){ ** Check all ON clauses in pSelect to verify that they do not reference ** columns to the right. */ -static void selectCheckOnClauses(Parse *pParse, Select *pSelect){ +SQLITE_PRIVATE void sqlite3SelectCheckOnClauses(Parse *pParse, Select *pSelect){ Walker w; CheckOnCtx sCtx; assert( pSelect->selFlags & SF_OnToWhere ); @@ -154500,18 +154528,6 @@ SQLITE_PRIVATE int sqlite3Select( } #endif - /* If the SELECT statement contains ON clauses that were moved into - ** the WHERE clause, go through and verify that none of the terms - ** in the ON clauses reference tables to the right of the ON clause. - ** Do this now, after name resolution, but before query flattening - */ - if( p->selFlags & SF_OnToWhere ){ - selectCheckOnClauses(pParse, p); - if( pParse->nErr ){ - goto select_end; - } - } - /* If the SF_UFSrcCheck flag is set, then this function is being called ** as part of populating the temp table for an UPDATE...FROM statement. ** In this case, it is an error if the target object (pSrc->a[0]) name @@ -164678,6 +164694,15 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( sqlite3ExprDup(pParse->db, pTerm->pExpr, 0)); } } + if( pLevel->iIdxCur ){ + /* pSubWhere may contain expressions that read from an index on the + ** table on the RHS of the right join. All such expressions first test + ** if the index is pointing at a NULL row, and if so, read from the + ** table cursor instead. So ensure that the index cursor really is + ** pointing at a NULL row here, so that no values are read from it during + ** the scan of the RHS of the RIGHT join below. */ + sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iIdxCur); + } pFrom = &uSrc.sSrc; pFrom->nSrc = 1; pFrom->nAlloc = 1; @@ -224164,7 +224189,7 @@ static int rbuDeltaApply( /* ERROR: copy exceeds output file size */ return -1; } - if( (int)(ofst+cnt) > lenSrc ){ + if( (u64)ofst+(u64)cnt > (u64)lenSrc ){ /* ERROR: copy extends past end of input */ return -1; } @@ -252451,7 +252476,7 @@ static void fts5DoSecureDelete( int iSegid = pSeg->pSeg->iSegid; u8 *aPg = pSeg->pLeaf->p; int nPg = pSeg->pLeaf->nn; - int iPgIdx = pSeg->pLeaf->szLeaf; + int iPgIdx = pSeg->pLeaf->szLeaf; /* Offset of page footer */ u64 iDelta = 0; int iNextOff = 0; @@ -252530,7 +252555,7 @@ static void fts5DoSecureDelete( iSOP += fts5GetVarint32(&aPg[iSOP], nPos); } assert_nc( iSOP==pSeg->iLeafOffset ); - iNextOff = pSeg->iLeafOffset + pSeg->nPos; + iNextOff = iSOP + pSeg->nPos; } } @@ -260349,7 +260374,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2026-01-09 17:27:48 b270f8339eb13b504d0b2ba154ebca966b7dde08e40c3ed7d559749818cb2075", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2026-03-13 10:38:09 737ae4a34738ffa0c3ff7f9bb18df914dd1cad163f28fd6b6e114a344fe6d618", -1, SQLITE_TRANSIENT); } /* diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index 78ef0b7af..6871f8d6d 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -147,12 +147,12 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.51.2" -#define SQLITE_VERSION_NUMBER 3051002 -#define SQLITE_SOURCE_ID "2026-01-09 17:27:48 b270f8339eb13b504d0b2ba154ebca966b7dde08e40c3ed7d559749818cb2075" +#define SQLITE_VERSION "3.51.3" +#define SQLITE_VERSION_NUMBER 3051003 +#define SQLITE_SOURCE_ID "2026-03-13 10:38:09 737ae4a34738ffa0c3ff7f9bb18df914dd1cad163f28fd6b6e114a344fe6d618" #define SQLITE_SCM_BRANCH "branch-3.51" -#define SQLITE_SCM_TAGS "release version-3.51.2" -#define SQLITE_SCM_DATETIME "2026-01-09T17:27:48.405Z" +#define SQLITE_SCM_TAGS "release version-3.51.3" +#define SQLITE_SCM_DATETIME "2026-03-13T10:38:09.694Z" /* ** CAPI3REF: Run-Time Library Version Numbers diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index a967cab09..dffc2bda2 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -1783,15 +1783,18 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) { // Close the connection. func (c *SQLiteConn) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.db == nil { + return nil + } + runtime.SetFinalizer(c, nil) rv := C.sqlite3_close_v2(c.db) if rv != C.SQLITE_OK { - return c.lastError() + return lastError(c.db) } deleteHandles(c) - c.mu.Lock() c.db = nil - c.mu.Unlock() - runtime.SetFinalizer(c, nil) return nil } @@ -1932,16 +1935,18 @@ func (s *SQLiteStmt) Close() error { return nil } s.closed = true - if !s.c.dbConnOpen() { + runtime.SetFinalizer(s, nil) + conn := s.c + stmt := s.s + s.s = nil + s.c = nil + if !conn.dbConnOpen() { return errors.New("sqlite statement with already closed database connection") } - rv := C.sqlite3_finalize(s.s) - s.s = nil + rv := C.sqlite3_finalize(stmt) if rv != C.SQLITE_OK { - return s.c.lastError() + return conn.lastError() } - s.c = nil - runtime.SetFinalizer(s, nil) return nil } @@ -1958,6 +1963,8 @@ func (s *SQLiteStmt) bind(args []driver.NamedValue) error { return s.c.lastError() } + C.sqlite3_clear_bindings(s.s) + bindIndices := make([][3]int, len(args)) prefixes := []string{":", "@", "$"} for i, v := range args { diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go index 2d7fc0d7e..51dd9c8f0 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go @@ -17,7 +17,6 @@ import "C" import ( "fmt" "math" - "reflect" "unsafe" ) @@ -43,14 +42,8 @@ func (c *SQLiteConn) Serialize(schema string) ([]byte, error) { return nil, fmt.Errorf("serialized database is too large (%d bytes)", sz) } - cBuf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(ptr)), - Len: int(sz), - Cap: int(sz), - })) - res := make([]byte, int(sz)) - copy(res, cBuf) + copy(res, unsafe.Slice((*byte)(unsafe.Pointer(ptr)), int(sz))) return res, nil } @@ -67,12 +60,7 @@ func (c *SQLiteConn) Deserialize(b []byte, schema string) error { defer C.free(unsafe.Pointer(zSchema)) tmpBuf := (*C.uchar)(C.sqlite3_malloc64(C.sqlite3_uint64(len(b)))) - cBuf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(tmpBuf)), - Len: len(b), - Cap: len(b), - })) - copy(cBuf, b) + copy(unsafe.Slice((*byte)(unsafe.Pointer(tmpBuf)), len(b)), b) rc := C.sqlite3_deserialize(c.db, zSchema, tmpBuf, C.sqlite3_int64(len(b)), C.sqlite3_int64(len(b)), C.SQLITE_DESERIALIZE_FREEONCLOSE) diff --git a/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/model_to_declcfg.go b/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/model_to_declcfg.go index fabb0d0d2..a7732581e 100644 --- a/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/model_to_declcfg.go +++ b/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/model_to_declcfg.go @@ -54,7 +54,7 @@ func ConvertFromModel(mpkgs model.Model) DeclarativeConfig { } func traverseModelChannels(mpkg model.Package) ([]Channel, []Bundle) { - channels := []Channel{} + channels := make([]Channel, 0, len(mpkg.Channels)) bundleMap := map[string]*Bundle{} for _, ch := range mpkg.Channels { diff --git a/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go b/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go index 3149ae503..1b4e8aff6 100644 --- a/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go +++ b/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go @@ -337,7 +337,7 @@ type Bundle struct { func (b *Bundle) VersionString() string { if len(b.Release.Pre) > 0 { - pres := []string{} + pres := make([]string, 0, len(b.Release.Pre)) for _, pre := range b.Release.Pre { pres = append(pres, pre.String()) } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/validate.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/validate.go index 4ec65d238..833fa436b 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/validate.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/validate.go @@ -233,7 +233,7 @@ func validateAnnotations(mediaType string, fileAnnotations *AnnotationMetadata) // Validate the dependencies file func validateDependencies(dependenciesFile *registry.DependenciesFile) []error { - var validationErrors []error + validationErrors := make([]error, 0, len(dependenciesFile.Dependencies)) // Validate dependencies if exists for _, d := range dependenciesFile.Dependencies { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/conversion.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/conversion.go index 62bb619f4..933ec51e1 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/conversion.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/conversion.go @@ -12,7 +12,7 @@ import ( ) func PackageManifestToAPIPackage(manifest *PackageManifest) *api.Package { - channels := []*api.Channel{} + channels := make([]*api.Channel, 0, len(manifest.Channels)) for _, c := range manifest.Channels { channels = append(channels, PackageChannelToAPIChannel(&c)) } diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d12c8920a..db1f55101 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -17,6 +17,7 @@ linters: - ineffassign - misspell - modernize + - noctx - perfsprint - revive - staticcheck @@ -88,6 +89,16 @@ linters: deny: - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal desc: Do not use cross-module internal packages. + semconv: + list-mode: lax + files: + - "!**/semconv/**" + - "!**/exporters/zipkin/**" + deny: + - pkg: go.opentelemetry.io/otel/semconv + desc: "Use go.opentelemetry.io/otel/semconv/v1.40.0 instead. If a newer semconv version has been released, update the depguard rule." + allow: + - go.opentelemetry.io/otel/semconv/v1.40.0 gocritic: disabled-checks: - appendAssign diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index ab7263014..20edda441 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,49 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.43.0/0.65.0/0.19.0] 2026-04-02 + +### Added + +- Add `IsRandom` and `WithRandom` on `TraceFlags`, and `IsRandom` on `SpanContext` in `go.opentelemetry.io/otel/trace` for [W3C Trace Context Level 2 Random Trace ID Flag](https://www.w3.org/TR/trace-context-2/#random-trace-id-flag) support. (#8012) +- Add service detection with `WithService` in `go.opentelemetry.io/otel/sdk/resource`. (#7642) +- Add `DefaultWithContext` and `EnvironmentWithContext` in `go.opentelemetry.io/otel/sdk/resource` to support plumbing `context.Context` through default and environment detectors. (#8051) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#8038) +- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8038) +- Add support for per-series start time tracking for cumulative metrics in `go.opentelemetry.io/otel/sdk/metric`. + Set `OTEL_GO_X_PER_SERIES_START_TIMESTAMPS=true` to enable. (#8060) +- Add `WithCardinalityLimitSelector` for metric reader for configuring cardinality limits specific to the instrument kind. (#7855) + +### Changed + +- Introduce the `EMPTY` Type in `go.opentelemetry.io/otel/attribute` to reflect that an empty value is now a valid value, with `INVALID` remaining as a deprecated alias of `EMPTY`. (#8038) +- Improve slice handling in `go.opentelemetry.io/otel/attribute` to optimize short slice values with fixed-size fast paths. (#8039) +- Improve performance of span metric recording in `go.opentelemetry.io/otel/sdk/trace` by returning early if self-observability is not enabled. (#8067) +- Improve formatting of metric data diffs in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8073) + +### Deprecated + +- Deprecate `INVALID` in `go.opentelemetry.io/otel/attribute`. Use `EMPTY` instead. (#8038) + +### Fixed + +- Return spec-compliant `TraceIdRatioBased` description. This is a breaking behavioral change, but it is necessary to + make the implementation [spec-compliant](https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased). (#8027) +- Fix a race condition in `go.opentelemetry.io/otel/sdk/metric` where the lastvalue aggregation could collect the value 0 even when no zero-value measurements were recorded. (#8056) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. + Responses exceeding the limit are treated as non-retryable errors. (#8108) +- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `kenv` command on BSD. (#8113) +- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to correctly handle HTTP2 GOAWAY frame. (#8096) + ## [1.42.0/0.64.0/0.18.0/0.0.16] 2026-03-06 ### Added @@ -3576,7 +3619,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.42.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.43.0...HEAD +[1.43.0/0.65.0/0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.43.0 [1.42.0/0.64.0/0.18.0/0.0.16]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.42.0 [1.41.0/0.63.0/0.17.0/0.0.15]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.41.0 [1.40.0/0.62.0/0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.40.0 diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 9f6e6b511..42466f2d6 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -38,10 +38,14 @@ CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit +SEMCONVKIT_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/semconvkit -type f)) $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit +$(TOOLS)/semconvkit: $(SEMCONVKIT_FILES) VERIFYREADMES = $(TOOLS)/verifyreadmes +VERIFYREADMES_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/verifyreadmes -type f)) $(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes +$(TOOLS)/verifyreadmes: $(VERIFYREADMES_FILES) GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 861756fd7..6aff7548c 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -4,7 +4,9 @@ Create a `Version Release` issue to track the release process. -## Semantic Convention Generation +## Semantic Convention Upgrade + +### Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. @@ -22,6 +24,43 @@ make semconv-generate # Uses the exported TAG. This should create a new sub-package of [`semconv`](./semconv). Ensure things look correct before submitting a pull request to include the addition. +The `CHANGELOG.md` should also be updated to reflect the new changes: + +```md +- The `go.opentelemetry.io/otel/semconv/` package. The package contains semantic conventions from the `` version of the OpenTelemetry Semantic Conventions. See the [migration documentation](./semconv//MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/`. (#PR_NUMBER) +``` + +> **Tip:** Change to the release and prior version to match the changes + +### Update semconv imports + +Once the new semconv module has been generated, update all semconv imports throughout the codebase to reference the new version: + +```go +// Before +semconv "go.opentelemetry.io/otel/semconv/v1.37.0" +"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + + +// After +semconv "go.opentelemetry.io/otel/semconv/v1.39.0" +"go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" +``` + +Once complete, run `make` to check for any compilation or test failures. + +#### Handling attribute changes + +Some semconv releases might add new attributes or impact attributes that are currently being used. Changes could stem from a simple renaming, to more complex changes like merging attributes and property values being changed. + +One should update the code to the new attributes that supersede the impacted ones, hence sticking to the semantic conventions. However, legacy attributes might still be emitted in accordance to the `OTEL_SEMCONV_STABILITY_OPT_IN` environment variable. + +For an example on how such migration might have to be tracked and performed, see issue [#7806](https://github.com/open-telemetry/opentelemetry-go/issues/7806). + +### Go contrib linter update + +Update [.golangci.yml](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/.golangci.yml) in [opentelemetry-go-contrib](https://github.com/open-telemetry/opentelemetry-go-contrib/) to mandate the new semconv version. + ## Breaking changes validation You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 6cc1a1655..771dd69c5 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -53,7 +53,7 @@ var ( _ Encoder = &defaultAttrEncoder{} // encoderIDCounter is for generating IDs for other attribute encoders. - encoderIDCounter uint64 + encoderIDCounter atomic.Uint64 defaultEncoderOnce sync.Once defaultEncoderID = NewEncoderID() @@ -64,7 +64,7 @@ var ( // once per each type of attribute encoder. Preferably in init() or in var // definition. func NewEncoderID() EncoderID { - return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} + return EncoderID{value: encoderIDCounter.Add(1)} } // DefaultEncoder returns an attribute encoder that encodes attributes in such diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go index 6aa69aeae..b09caaa6d 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/hash.go +++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -27,6 +27,7 @@ const ( int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) + emptyID uint64 = 7305809155345288421 // "__empty_" (little endian) ) // hashKVs returns a new xxHash64 hash of kvs. @@ -80,7 +81,8 @@ func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { for i := 0; i < rv.Len(); i++ { h = h.String(rv.Index(i).String()) } - case INVALID: + case EMPTY: + h = h.Uint64(emptyID) default: // Logging is an alternative, but using the internal logger here // causes an import cycle so it is not done. diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index 7f5eae877..d9f51fa2d 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -11,80 +11,63 @@ import ( "reflect" ) -// BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[bool]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() +// sliceElem is the exact set of element types stored in attribute slice values. +// Using a closed set prevents accidental instantiations for unsupported types. +type sliceElem interface { + bool | int64 | float64 | string } -// Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} - -// Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[float64]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} +// SliceValue converts a slice into an array with the same elements. +func SliceValue[T sliceElem](v []T) any { + // Keep only the common tiny-slice cases out of reflection. Extending this + // much further increases code size for diminishing benefit while larger + // slices still need the generic reflective path to preserve comparability. + // This matches the short lengths that show up most often in local + // benchmarks and semantic convention examples while leaving larger, less + // predictable slices on the generic reflective path. + switch len(v) { + case 0: + return [0]T{} + case 1: + return [1]T{v[0]} + case 2: + return [2]T{v[0], v[1]} + case 3: + return [3]T{v[0], v[1], v[2]} + } -// StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[string]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() + return sliceValueReflect(v) } -// AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v any) []bool { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil +// AsSlice converts an array into a slice with the same elements. +func AsSlice[T sliceElem](v any) []T { + // Mirror the small fixed-array fast path used by SliceValue. + switch a := v.(type) { + case [0]T: + return []T{} + case [1]T: + return []T{a[0]} + case [2]T: + return []T{a[0], a[1]} + case [3]T: + return []T{a[0], a[1], a[2]} } - cpy := make([]bool, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy -} -// AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v any) []int64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]int64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy + return asSliceReflect[T](v) } -// AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v any) []float64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - cpy := make([]float64, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy +func sliceValueReflect[T sliceElem](v []T) any { + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[T]())).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } -// AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v any) []string { +func asSliceReflect[T sliceElem](v any) []T { rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { + if !rv.IsValid() || rv.Kind() != reflect.Array || rv.Type().Elem() != reflect.TypeFor[T]() { return nil } - cpy := make([]string, rv.Len()) + cpy := make([]T, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 8c6928ca7..0cc368018 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -15,7 +15,7 @@ type KeyValue struct { // Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { - return kv.Key.Defined() && kv.Value.Type() != INVALID + return kv.Key.Defined() } // Bool creates a KeyValue with a BOOL Value type. diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go index 24f1fa37d..6c04448d6 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -8,7 +8,7 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} - _ = x[INVALID-0] + _ = x[EMPTY-0] _ = x[BOOL-1] _ = x[INT64-2] _ = x[FLOAT64-3] @@ -19,9 +19,9 @@ func _() { _ = x[STRINGSLICE-8] } -const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" +const _Type_name = "EMPTYBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" -var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} +var _Type_index = [...]uint8{0, 5, 9, 14, 21, 27, 36, 46, 58, 69} func (i Type) String() string { idx := int(i) - 0 diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 5931e7129..db04b1326 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -6,7 +6,6 @@ package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "fmt" - "reflect" "strconv" attribute "go.opentelemetry.io/otel/attribute/internal" @@ -18,6 +17,8 @@ import ( type Type int // nolint: revive // redefines builtin Type. // Value represents the value part in key-value pairs. +// +// Note that the zero value is a valid empty value. type Value struct { vtype Type numeric uint64 @@ -26,8 +27,8 @@ type Value struct { } const ( - // INVALID is used for a Value with no value set. - INVALID Type = iota + // EMPTY is used for a Value with no value set. + EMPTY Type = iota // BOOL is a boolean Type Value. BOOL // INT64 is a 64-bit signed integral Type Value. @@ -44,6 +45,10 @@ const ( FLOAT64SLICE // STRINGSLICE is a slice of strings Type Value. STRINGSLICE + // INVALID is used for a Value with no value set. + // + // Deprecated: Use EMPTY instead as an empty value is a valid value. + INVALID = EMPTY ) // BoolValue creates a BOOL Value. @@ -56,7 +61,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} + return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)} } // IntValue creates an INT64 Value. @@ -64,16 +69,30 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } -// IntSliceValue creates an INTSLICE Value. +// IntSliceValue creates an INT64SLICE Value. func IntSliceValue(v []int) Value { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())) - for i, val := range v { - cp.Elem().Index(i).SetInt(int64(val)) - } - return Value{ - vtype: INT64SLICE, - slice: cp.Elem().Interface(), + val := Value{vtype: INT64SLICE} + + // Avoid the common tiny-slice cases from allocating a new slice. + switch len(v) { + case 0: + val.slice = [0]int64{} + case 1: + val.slice = [1]int64{int64(v[0])} + case 2: + val.slice = [2]int64{int64(v[0]), int64(v[1])} + case 3: + val.slice = [3]int64{int64(v[0]), int64(v[1]), int64(v[2])} + default: + // Fallback to a new slice for larger slices. + cp := make([]int64, len(v)) + for i, val := range v { + cp[i] = int64(val) + } + val.slice = attribute.SliceValue(cp) } + + return val } // Int64Value creates an INT64 Value. @@ -86,7 +105,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} + return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -99,7 +118,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} + return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)} } // StringValue creates a STRING Value. @@ -112,7 +131,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} + return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)} } // Type returns a type of the Value. @@ -136,7 +155,7 @@ func (v Value) AsBoolSlice() []bool { } func (v Value) asBoolSlice() []bool { - return attribute.AsBoolSlice(v.slice) + return attribute.AsSlice[bool](v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -155,7 +174,7 @@ func (v Value) AsInt64Slice() []int64 { } func (v Value) asInt64Slice() []int64 { - return attribute.AsInt64Slice(v.slice) + return attribute.AsSlice[int64](v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -174,7 +193,7 @@ func (v Value) AsFloat64Slice() []float64 { } func (v Value) asFloat64Slice() []float64 { - return attribute.AsFloat64Slice(v.slice) + return attribute.AsSlice[float64](v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -193,7 +212,7 @@ func (v Value) AsStringSlice() []string { } func (v Value) asStringSlice() []string { - return attribute.AsStringSlice(v.slice) + return attribute.AsSlice[string](v.slice) } type unknownValueType struct{} @@ -217,6 +236,8 @@ func (v Value) AsInterface() any { return v.stringly case STRINGSLICE: return v.asStringSlice() + case EMPTY: + return nil } return unknownValueType{} } @@ -252,6 +273,8 @@ func (v Value) Emit() string { return string(j) case STRING: return v.stringly + case EMPTY: + return "" default: return "unknown" } diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index f0cc942ba..7a9b3c055 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python -FROM otel/weaver:v0.21.2@sha256:2401de985c38bdb98b43918e2f43aa36b2afed4aa5669ac1c1de0a17301cd36d AS weaver +FROM otel/weaver:v0.22.1@sha256:33ae522ae4b71c1c562563c1d81f46aa0f79f088a0873199143a1f11ac30e5c9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go index bfeb73e81..694b64a31 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go @@ -37,3 +37,18 @@ var Observability = newFeature( return "", false }, ) + +// PerSeriesStartTimestamps is an experimental feature flag that determines if the SDK +// uses the new Start Timestamps specification. +// +// To enable this feature set the OTEL_GO_X_PER_SERIES_START_TIMESTAMPS environment variable +// to the case-insensitive string value of "true". +var PerSeriesStartTimestamps = newFeature( + []string{"PER_SERIES_START_TIMESTAMPS"}, + func(v string) (bool, bool) { + if strings.EqualFold(v, "true") { + return true, true + } + return false, false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 8a7bb330b..04f15fcd2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go index 0d6e213d9..a3d647d92 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -193,3 +193,11 @@ func WithContainer() Option { func WithContainerID() Option { return WithDetectors(cgroupContainerIDDetector{}) } + +// WithService adds all the Service attributes to the configured Resource. +func WithService() Option { + return WithDetectors( + defaultServiceInstanceIDDetector{}, + defaultServiceNameDetector{}, + ) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index a19b39def..e977ff1c4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index c49157224..bc0e5c19e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 023621ba7..755c08242 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type hostIDProvider func() (string, error) @@ -31,19 +31,19 @@ type hostIDReaderBSD struct { readFile fileReader } -// read attempts to read the machine-id from /etc/hostid. If not found it will -// execute `kenv -q smbios.system.uuid`. If neither location yields an id an -// error will be returned. +// read attempts to read the machine-id from /etc/hostid. +// If not found it will execute: /bin/kenv -q smbios.system.uuid. +// If neither location yields an id an error will be returned. func (r *hostIDReaderBSD) read() (string, error) { if result, err := r.readFile("/etc/hostid"); err == nil { return strings.TrimSpace(result), nil } - if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + if result, err := r.execCommand("/bin/kenv", "-q", "smbios.system.uuid"); err == nil { return strings.TrimSpace(result), nil } - return "", errors.New("host id not found in: /etc/hostid or kenv") + return "", errors.New("host id not found in: /etc/hostid or /bin/kenv") } // hostIDReaderDarwin implements hostIDReader. diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go index 6354b3560..c95d87685 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -8,7 +8,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import "os" func readFile(filename string) (string, error) { - b, err := os.ReadFile(filename) + b, err := os.ReadFile(filename) // nolint:gosec // false positive if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 534809e21..f5682cad4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index a1189553c..99dce64f6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 28e1e4f7e..f715be53e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -232,6 +232,15 @@ func Empty() *Resource { // Default returns an instance of Resource with a default // "service.name" and OpenTelemetrySDK attributes. func Default() *Resource { + return DefaultWithContext(context.Background()) +} + +// DefaultWithContext returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +// +// If the default resource has already been initialized, the provided ctx +// is ignored and the cached resource is returned. +func DefaultWithContext(ctx context.Context) *Resource { defaultResourceOnce.Do(func() { var err error defaultDetectors := []Detector{ @@ -243,7 +252,7 @@ func Default() *Resource { defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) } defaultResource, err = Detect( - context.Background(), + ctx, defaultDetectors..., ) if err != nil { @@ -260,8 +269,14 @@ func Default() *Resource { // Environment returns an instance of Resource with attributes // extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. func Environment() *Resource { + return EnvironmentWithContext(context.Background()) +} + +// EnvironmentWithContext returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func EnvironmentWithContext(ctx context.Context) *Resource { detector := &fromEnv{} - resource, err := detector.Detect(context.Background()) + resource, err := detector.Detect(ctx) if err != nil { otel.Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 7d15cbb9c..32854b14a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -68,7 +68,7 @@ type batchSpanProcessor struct { o BatchSpanProcessorOptions queue chan ReadOnlySpan - dropped uint32 + dropped atomic.Uint32 inst *observ.BSP @@ -123,12 +123,10 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO otel.Handle(err) } - bsp.stopWait.Add(1) - go func() { - defer bsp.stopWait.Done() + bsp.stopWait.Go(func() { bsp.processQueue() bsp.drainQueue() - }() + }) return bsp } @@ -295,7 +293,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { } if l := len(bsp.batch); l > 0 { - global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", bsp.dropped.Load()) if bsp.inst != nil { bsp.inst.Processed(ctx, int64(l)) } @@ -423,7 +421,7 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) case bsp.queue <- sd: return true default: - atomic.AddUint32(&bsp.dropped, 1) + bsp.dropped.Add(1) if bsp.inst != nil { bsp.inst.ProcessedQueueFull(ctx, 1) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go index d9cfba0b4..c31e03aa0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go @@ -13,8 +13,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go index 8afd05267..0e77cd953 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go @@ -13,8 +13,8 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" ) var measureAttrsPool = sync.Pool{ diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go index 13a2db296..560d316f2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/x" - "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" + "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -55,6 +55,10 @@ func NewTracer() (Tracer, error) { func (t Tracer) Enabled() bool { return t.enabled } func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { + if !t.started.Enabled(ctx) { + return + } + key := spanStartedKey{ parent: parentStateNoParent, sampling: samplingStateDrop, @@ -89,6 +93,10 @@ func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { } func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { + if !t.live.Enabled(ctx) { + return + } + key := spanLiveKey{sampled: span.SpanContext().IsSampled()} opts := spanLiveOpts[key] t.live.Add(ctx, value, opts...) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index d2cf4ebd3..cd40d299d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,6 +5,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "fmt" "sync" "sync/atomic" @@ -262,6 +263,7 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { return nil } + var err error for _, sps := range spss { select { case <-ctx.Done(): @@ -269,11 +271,9 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { default: } - if err := sps.sp.ForceFlush(ctx); err != nil { - return err - } + err = errors.Join(err, sps.sp.ForceFlush(ctx)) } - return nil + return err } // Shutdown shuts down TracerProvider. All registered span processors are shut down @@ -303,14 +303,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { sps.state.Do(func() { err = sps.sp.Shutdown(ctx) }) - if err != nil { - if retErr == nil { - retErr = err - } else { - // Poor man's list of errors - retErr = fmt.Errorf("%w; %w", retErr, err) - } - } + retErr = errors.Join(retErr, err) } p.spanProcessors.Store(&spanProcessorStates{}) return retErr diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index 81c5060ad..845e292c2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -69,17 +69,17 @@ type traceIDRatioSampler struct { } func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { - psc := trace.SpanContextFromContext(p.ParentContext) + state := trace.SpanContextFromContext(p.ParentContext).TraceState() x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 if x < ts.traceIDUpperBound { return SamplingResult{ Decision: RecordAndSample, - Tracestate: psc.TraceState(), + Tracestate: state, } } return SamplingResult{ Decision: Drop, - Tracestate: psc.TraceState(), + Tracestate: state, } } @@ -94,12 +94,20 @@ func (ts traceIDRatioSampler) Description() string { // //nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` func TraceIDRatioBased(fraction float64) Sampler { + // Cannot use AlwaysSample() and NeverSample(), must return spec-compliant descriptions. + // See https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased. if fraction >= 1 { - return AlwaysSample() + return predeterminedSampler{ + description: "TraceIDRatioBased{1}", + decision: RecordAndSample, + } } if fraction <= 0 { - fraction = 0 + return predeterminedSampler{ + description: "TraceIDRatioBased{0}", + decision: Drop, + } } return &traceIDRatioSampler{ @@ -118,6 +126,7 @@ func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { } func (alwaysOnSampler) Description() string { + // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwayson return "AlwaysOnSampler" } @@ -139,6 +148,7 @@ func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { } func (alwaysOffSampler) Description() string { + // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwaysoff return "AlwaysOffSampler" } @@ -147,6 +157,22 @@ func NeverSample() Sampler { return alwaysOffSampler{} } +type predeterminedSampler struct { + description string + decision SamplingDecision +} + +func (s predeterminedSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: s.decision, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (s predeterminedSampler) Description() string { + return s.description +} + // ParentBased returns a sampler decorator which behaves differently, // based on the parent of the span. If the span has no parent, // the decorated sampler is used to make sampling decision. If the span has diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index d46661059..7d55ce1dc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.39.0" + semconv "go.opentelemetry.io/otel/semconv/v1.40.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index b5497c281..766731dd2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.40.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go new file mode 100644 index 000000000..901da8698 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go @@ -0,0 +1,2222 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelconv provides types and functionality for OpenTelemetry semantic +// conventions in the "otel" namespace. +package otelconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ComponentTypeAttr is an attribute conforming to the otel.component.type +// semantic conventions. It represents a name identifying the type of the +// OpenTelemetry component. +type ComponentTypeAttr string + +var ( + // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span + // processor. + ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" + // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. + ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" + // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record + // processor. + ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" + // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record + // processor. + ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" + // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with + // protobuf serialization. + ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" + // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with + // protobuf serialization. + ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" + // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" + // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. + ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" + // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" + // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" + // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over + // HTTP with JSON serialization. + ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" + // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting + // metric reader. + ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" + // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" + // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" + // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" + // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric + // exporter over HTTP with the default text-based format. + ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" +) + +// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin +// semantic conventions. It represents the determines whether the span has a +// parent span, and if so, [whether it is a remote parent]. +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +type SpanParentOriginAttr string + +var ( + // SpanParentOriginNone is the span does not have a parent, it is a root span. + SpanParentOriginNone SpanParentOriginAttr = "none" + // SpanParentOriginLocal is the span has a parent and the parent's span context + // [isRemote()] is false. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginLocal SpanParentOriginAttr = "local" + // SpanParentOriginRemote is the span has a parent and the parent's span context + // [isRemote()] is true. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginRemote SpanParentOriginAttr = "remote" +) + +// SpanSamplingResultAttr is an attribute conforming to the +// otel.span.sampling_result semantic conventions. It represents the result value +// of the sampler for this span. +type SpanSamplingResultAttr string + +var ( + // SpanSamplingResultDrop is the span is not sampled and not recording. + SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" + // SpanSamplingResultRecordOnly is the span is not sampled, but recording. + SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" + // SpanSamplingResultRecordAndSample is the span is sampled and recording. + SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" +) + +// SDKExporterLogExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It +// represents the number of log records for which the export has finished, either +// successful or failed. +type SDKExporterLogExported struct { + metric.Int64Counter +} + +var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + +// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. +func NewSDKExporterLogExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterLogExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterLogExportedOpts + } else { + opt = append(opt, newSDKExporterLogExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.log.exported", + opt..., + ) + if err != nil { + return SDKExporterLogExported{noop.Int64Counter{}}, err + } + return SDKExporterLogExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogExported) Name() string { + return "otel.sdk.exporter.log.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogExported) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogExported) Description() string { + return "The number of log records for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterLogInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It +// represents the number of log records which were passed to the exporter, but +// that have not been exported yet (neither successful, nor failed). +type SDKExporterLogInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), +} + +// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. +func NewSDKExporterLogInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterLogInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterLogInflightOpts + } else { + opt = append(opt, newSDKExporterLogInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.log.inflight", + opt..., + ) + if err != nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterLogInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogInflight) Name() string { + return "otel.sdk.exporter.log.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogInflight) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogInflight) Description() string { + return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointExported is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.exported" +// semantic conventions. It represents the number of metric data points for which +// the export has finished, either successful or failed. +type SDKExporterMetricDataPointExported struct { + metric.Int64Counter +} + +var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), +} + +// NewSDKExporterMetricDataPointExported returns a new +// SDKExporterMetricDataPointExported instrument. +func NewSDKExporterMetricDataPointExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterMetricDataPointExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointExportedOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.metric_data_point.exported", + opt..., + ) + if err != nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + } + return SDKExporterMetricDataPointExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointExported) Name() string { + return "otel.sdk.exporter.metric_data_point.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointExported) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointExported) Description() string { + return "The number of metric data points for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointInflight is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" +// semantic conventions. It represents the number of metric data points which +// were passed to the exporter, but that have not been exported yet (neither +// successful, nor failed). +type SDKExporterMetricDataPointInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), +} + +// NewSDKExporterMetricDataPointInflight returns a new +// SDKExporterMetricDataPointInflight instrument. +func NewSDKExporterMetricDataPointInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterMetricDataPointInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointInflightOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.metric_data_point.inflight", + opt..., + ) + if err != nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterMetricDataPointInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointInflight) Name() string { + return "otel.sdk.exporter.metric_data_point.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointInflight) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointInflight) Description() string { + return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterOperationDuration is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. +// It represents the duration of exporting a batch of telemetry records. +type SDKExporterOperationDuration struct { + metric.Float64Histogram +} + +var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), +} + +// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration +// instrument. +func NewSDKExporterOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKExporterOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterOperationDurationOpts + } else { + opt = append(opt, newSDKExporterOperationDurationOpts...) + } + + i, err := m.Float64Histogram( + "otel.sdk.exporter.operation.duration", + opt..., + ) + if err != nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + } + return SDKExporterOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterOperationDuration) Name() string { + return "otel.sdk.exporter.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterOperationDuration) Description() string { + return "The duration of exporting a batch of telemetry records." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrHTTPResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the HTTP status +// code of the last HTTP request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrRPCResponseStatusCode returns an optional attribute for the +// "rpc.response.status_code" semantic convention. It represents the gRPC status +// code of the last gRPC request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrRPCResponseStatusCode(val string) attribute.KeyValue { + return attribute.String("rpc.response.status_code", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It +// represents the number of spans for which the export has finished, either +// successful or failed. +type SDKExporterSpanExported struct { + metric.Int64Counter +} + +var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + +// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. +func NewSDKExporterSpanExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterSpanExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterSpanExportedOpts + } else { + opt = append(opt, newSDKExporterSpanExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.span.exported", + opt..., + ) + if err != nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, err + } + return SDKExporterSpanExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanExported) Name() string { + return "otel.sdk.exporter.span.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanExported) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanExported) Description() string { + return "The number of spans for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It +// represents the number of spans which were passed to the exporter, but that +// have not been exported yet (neither successful, nor failed). +type SDKExporterSpanInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), +} + +// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. +func NewSDKExporterSpanInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterSpanInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterSpanInflightOpts + } else { + opt = append(opt, newSDKExporterSpanInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.span.inflight", + opt..., + ) + if err != nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterSpanInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanInflight) Name() string { + return "otel.sdk.exporter.span.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanInflight) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanInflight) Description() string { + return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKLogCreated is an instrument used to record metric values conforming to the +// "otel.sdk.log.created" semantic conventions. It represents the number of logs +// submitted to enabled SDK Loggers. +type SDKLogCreated struct { + metric.Int64Counter +} + +var newSDKLogCreatedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), +} + +// NewSDKLogCreated returns a new SDKLogCreated instrument. +func NewSDKLogCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKLogCreated, error) { + // Check if the meter is nil. + if m == nil { + return SDKLogCreated{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKLogCreatedOpts + } else { + opt = append(opt, newSDKLogCreatedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.log.created", + opt..., + ) + if err != nil { + return SDKLogCreated{noop.Int64Counter{}}, err + } + return SDKLogCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKLogCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKLogCreated) Name() string { + return "otel.sdk.log.created" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKLogCreated) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKLogCreated) Description() string { + return "The number of logs submitted to enabled SDK Loggers." +} + +// Add adds incr to the existing count for attrs. +func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SDKMetricReaderCollectionDuration is an instrument used to record metric +// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic +// conventions. It represents the duration of the collect operation of the metric +// reader. +type SDKMetricReaderCollectionDuration struct { + metric.Float64Histogram +} + +var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), +} + +// NewSDKMetricReaderCollectionDuration returns a new +// SDKMetricReaderCollectionDuration instrument. +func NewSDKMetricReaderCollectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKMetricReaderCollectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newSDKMetricReaderCollectionDurationOpts + } else { + opt = append(opt, newSDKMetricReaderCollectionDurationOpts...) + } + + i, err := m.Float64Histogram( + "otel.sdk.metric_reader.collection.duration", + opt..., + ) + if err != nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + } + return SDKMetricReaderCollectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKMetricReaderCollectionDuration) Name() string { + return "otel.sdk.metric_reader.collection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKMetricReaderCollectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKMetricReaderCollectionDuration) Description() string { + return "The duration of the collect operation of the metric reader." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It +// represents the number of log records for which the processing has finished, +// either successful or failed. +type SDKProcessorLogProcessed struct { + metric.Int64Counter +} + +var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. +func NewSDKProcessorLogProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorLogProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogProcessedOpts + } else { + opt = append(opt, newSDKProcessorLogProcessedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.processor.log.processed", + opt..., + ) + if err != nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorLogProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogProcessed) Name() string { + return "otel.sdk.processor.log.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogProcessed) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogProcessed) Description() string { + return "The number of log records for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Log Record Processors MUST use `queue_full` for log records +// dropped due to a full queue. +func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.capacity" semantic +// conventions. It represents the maximum number of log records the queue of a +// given instance of an SDK Log Record processor can hold. +type SDKProcessorLogQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity +// instrument. +func NewSDKProcessorLogQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorLogQueueCapacityOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.capacity", + opt..., + ) + if err != nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueCapacity) Name() string { + return "otel.sdk.processor.log.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueCapacity) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueCapacity) Description() string { + return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It +// represents the number of log records in the queue of a given instance of an +// SDK log processor. +type SDKProcessorLogQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. +func NewSDKProcessorLogQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorLogQueueSizeOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.size", + opt..., + ) + if err != nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueSize) Name() string { + return "otel.sdk.processor.log.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueSize) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueSize) Description() string { + return "The number of log records in the queue of a given instance of an SDK log processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It +// represents the number of spans for which the processing has finished, either +// successful or failed. +type SDKProcessorSpanProcessed struct { + metric.Int64Counter +} + +var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed +// instrument. +func NewSDKProcessorSpanProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorSpanProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanProcessedOpts + } else { + opt = append(opt, newSDKProcessorSpanProcessedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.processor.span.processed", + opt..., + ) + if err != nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorSpanProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanProcessed) Name() string { + return "otel.sdk.processor.span.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanProcessed) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanProcessed) Description() string { + return "The number of spans for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a +// full queue. +func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.capacity" semantic +// conventions. It represents the maximum number of spans the queue of a given +// instance of an SDK span processor can hold. +type SDKProcessorSpanQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity +// instrument. +func NewSDKProcessorSpanQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.capacity", + opt..., + ) + if err != nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueCapacity) Name() string { + return "otel.sdk.processor.span.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueCapacity) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueCapacity) Description() string { + return "The maximum number of spans the queue of a given instance of an SDK span processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. +// It represents the number of spans in the queue of a given instance of an SDK +// span processor. +type SDKProcessorSpanQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize +// instrument. +func NewSDKProcessorSpanQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueSizeOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.size", + opt..., + ) + if err != nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueSize) Name() string { + return "otel.sdk.processor.span.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueSize) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueSize) Description() string { + return "The number of spans in the queue of a given instance of an SDK span processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKSpanLive is an instrument used to record metric values conforming to the +// "otel.sdk.span.live" semantic conventions. It represents the number of created +// spans with `recording=true` for which the end operation has not been called +// yet. +type SDKSpanLive struct { + metric.Int64UpDownCounter +} + +var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), +} + +// NewSDKSpanLive returns a new SDKSpanLive instrument. +func NewSDKSpanLive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKSpanLive, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKSpanLiveOpts + } else { + opt = append(opt, newSDKSpanLiveOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.span.live", + opt..., + ) + if err != nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, err + } + return SDKSpanLive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanLive) Name() string { + return "otel.sdk.span.live" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanLive) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanLive) Description() string { + return "The number of created spans with `recording=true` for which the end operation has not been called yet." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m SDKSpanLive) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} + +// SDKSpanStarted is an instrument used to record metric values conforming to the +// "otel.sdk.span.started" semantic conventions. It represents the number of +// created spans. +type SDKSpanStarted struct { + metric.Int64Counter +} + +var newSDKSpanStartedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), +} + +// NewSDKSpanStarted returns a new SDKSpanStarted instrument. +func NewSDKSpanStarted( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKSpanStarted, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanStarted{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKSpanStartedOpts + } else { + opt = append(opt, newSDKSpanStartedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.span.started", + opt..., + ) + if err != nil { + return SDKSpanStarted{noop.Int64Counter{}}, err + } + return SDKSpanStarted{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanStarted) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanStarted) Name() string { + return "otel.sdk.span.started" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanStarted) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanStarted) Description() string { + return "The number of created spans." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrSpanParentOrigin returns an optional attribute for the +// "otel.span.parent.origin" semantic convention. It represents the determines +// whether the span has a parent span, and if so, [whether it is a remote parent] +// . +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { + return attribute.String("otel.span.parent.origin", string(val)) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 96c06ec32..e3d103c4b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -196,6 +196,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // return tf &^ FlagsSampled } +// IsRandom reports whether the random bit is set in the TraceFlags. +func (tf TraceFlags) IsRandom() bool { + return tf&FlagsRandom == FlagsRandom +} + +// WithRandom sets the random bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithRandom(random bool) TraceFlags { // nolint:revive // random is not a control flag. + if random { + return tf | FlagsRandom + } + + return tf &^ FlagsRandom +} + // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { @@ -322,6 +336,11 @@ func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } +// IsRandom reports whether the random bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsRandom() bool { + return sc.traceFlags.IsRandom() +} + // WithTraceFlags returns a new SpanContext with the TraceFlags replaced. func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { return SpanContext{ diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 2dae06f25..1db4f47e4 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.42.0" + return "1.43.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 6c634ef31..bcc6ee78a 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.42.0 + version: v1.43.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.64.0 + version: v0.65.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.18.0 + version: v0.19.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -64,3 +64,6 @@ modules: go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: version-refs: - ./internal/version.go + go.opentelemetry.io/otel/exporters/stdout/stdoutlog: + version-refs: + - ./internal/version.go diff --git a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go index a08a97cd1..0e78fda4e 100644 --- a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go +++ b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go @@ -112,6 +112,9 @@ func (md *Metadata) Validate() error { return ValidationError("chart.metadata.name is required") } + if md.Name == "." || md.Name == ".." { + return ValidationErrorf("chart.metadata.name %q is not allowed", md.Name) + } if md.Name != filepath.Base(md.Name) { return ValidationErrorf("chart.metadata.name %q is invalid", md.Name) } diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go index ac59f2575..6508cb154 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go @@ -17,6 +17,7 @@ limitations under the License. package chartutil import ( + "fmt" "io" "os" "path/filepath" @@ -51,6 +52,17 @@ func Expand(dir string, r io.Reader) error { return errors.New("chart name not specified") } + // Reject chart names that are POSIX path dot-segments or dot-dot segments or contain path separators. + // A dot-segment name (e.g. ".") causes SecureJoin to resolve to the root + // directory and extraction then to write files directly into that extraction root + // instead of a per-chart subdirectory. + if chartName == "." || chartName == ".." { + return fmt.Errorf("chart name %q is not allowed", chartName) + } + if chartName != filepath.Base(chartName) { + return fmt.Errorf("chart name %q must not contain path separators", chartName) + } + // Find the base directory // The directory needs to be cleaned prior to passing to SecureJoin or the location may end up // being wrong or returning an error. This was introduced in v0.4.0. @@ -60,6 +72,12 @@ func Expand(dir string, r io.Reader) error { return err } + // Defense-in-depth: the chart directory must be a subdirectory of dir, + // never dir itself. + if chartdir == dir { + return fmt.Errorf("chart name %q resolves to the extraction root", chartName) + } + // Copy all files verbatim. We don't parse these files because parsing can remove // comments. for _, file := range files { diff --git a/vendor/modules.txt b/vendor/modules.txt index f1d4249a6..67b71d5ba 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -89,7 +89,7 @@ github.com/blang/semver/v4 # github.com/cenkalti/backoff/v5 v5.0.3 ## explicit; go 1.23 github.com/cenkalti/backoff/v5 -# github.com/cert-manager/cert-manager v1.20.0 +# github.com/cert-manager/cert-manager v1.20.1 ## explicit; go 1.25.0 github.com/cert-manager/cert-manager/pkg/apis/acme github.com/cert-manager/cert-manager/pkg/apis/acme/v1 @@ -235,7 +235,7 @@ github.com/davecgh/go-spew/spew # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/cli v29.3.0+incompatible +# github.com/docker/cli v29.3.1+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -284,7 +284,7 @@ github.com/felixge/httpsnoop ## explicit; go 1.17 github.com/fsnotify/fsnotify github.com/fsnotify/fsnotify/internal -# github.com/fxamacker/cbor/v2 v2.9.0 +# github.com/fxamacker/cbor/v2 v2.9.1 ## explicit; go 1.20 github.com/fxamacker/cbor/v2 # github.com/go-errors/errors v1.5.1 @@ -585,7 +585,7 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.19 ## explicit; go 1.20 github.com/mattn/go-runewidth -# github.com/mattn/go-sqlite3 v1.14.34 +# github.com/mattn/go-sqlite3 v1.14.37 ## explicit; go 1.19 github.com/mattn/go-sqlite3 # github.com/miekg/pkcs11 v1.1.2 @@ -653,7 +653,7 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runtime-spec v1.3.0 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/operator-framework/api v0.41.0 +# github.com/operator-framework/api v0.42.0 ## explicit; go 1.25.7 github.com/operator-framework/api/pkg/constraints github.com/operator-framework/api/pkg/encoding @@ -679,7 +679,7 @@ github.com/operator-framework/helm-operator-plugins/pkg/storage github.com/operator-framework/operator-lib/handler github.com/operator-framework/operator-lib/handler/internal/metrics github.com/operator-framework/operator-lib/internal/annotation -# github.com/operator-framework/operator-registry v1.64.0 +# github.com/operator-framework/operator-registry v1.65.0 ## explicit; go 1.25.7 github.com/operator-framework/operator-registry/alpha/declcfg github.com/operator-framework/operator-registry/alpha/model @@ -839,7 +839,7 @@ go.opentelemetry.io/auto/sdk/internal/telemetry go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv -# go.opentelemetry.io/otel v1.42.0 +# go.opentelemetry.io/otel v1.43.0 ## explicit; go 1.25.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -858,6 +858,7 @@ go.opentelemetry.io/otel/semconv/v1.39.0 go.opentelemetry.io/otel/semconv/v1.39.0/httpconv go.opentelemetry.io/otel/semconv/v1.39.0/otelconv go.opentelemetry.io/otel/semconv/v1.40.0 +go.opentelemetry.io/otel/semconv/v1.40.0/otelconv # go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 ## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -872,13 +873,13 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x -# go.opentelemetry.io/otel/metric v1.42.0 +# go.opentelemetry.io/otel/metric v1.43.0 ## explicit; go 1.25.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.40.0 -## explicit; go 1.24.0 +# go.opentelemetry.io/otel/sdk v1.43.0 +## explicit; go 1.25.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/x @@ -886,7 +887,7 @@ go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/internal/env go.opentelemetry.io/otel/sdk/trace/internal/observ -# go.opentelemetry.io/otel/trace v1.42.0 +# go.opentelemetry.io/otel/trace v1.43.0 ## explicit; go 1.25.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1122,7 +1123,7 @@ google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d +# google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 ## explicit; go 1.25.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status @@ -1251,7 +1252,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.20.1 +# helm.sh/helm/v3 v3.20.2 ## explicit; go 1.25.0 helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/resolver @@ -1446,7 +1447,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.35.2 => k8s.io/apiserver v0.35.0 +# k8s.io/apiserver v0.35.3 => k8s.io/apiserver v0.35.0 ## explicit; go 1.25.0 k8s.io/apiserver/pkg/apis/apiserver k8s.io/apiserver/pkg/apis/apiserver/install @@ -1862,7 +1863,7 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.35.2 => k8s.io/component-base v0.35.0 +# k8s.io/component-base v0.35.3 => k8s.io/component-base v0.35.0 ## explicit; go 1.25.0 k8s.io/component-base/cli/flag k8s.io/component-base/compatibility @@ -1974,7 +1975,7 @@ oras.land/oras-go/v2/registry/remote/credentials/trace oras.land/oras-go/v2/registry/remote/errcode oras.land/oras-go/v2/registry/remote/internal/errutil oras.land/oras-go/v2/registry/remote/retry -# pkg.package-operator.run/boxcutter v0.13.0 +# pkg.package-operator.run/boxcutter v0.13.1 ## explicit; go 1.25.3 pkg.package-operator.run/boxcutter pkg.package-operator.run/boxcutter/machinery diff --git a/vendor/pkg.package-operator.run/boxcutter/boxcutter.go b/vendor/pkg.package-operator.run/boxcutter/boxcutter.go index 38f5b68d5..d4519b4af 100644 --- a/vendor/pkg.package-operator.run/boxcutter/boxcutter.go +++ b/vendor/pkg.package-operator.run/boxcutter/boxcutter.go @@ -137,7 +137,14 @@ type RevisionEngineOptions struct { // Optional - PhaseValidator *validation.PhaseValidator + // UnfilteredReader is a client.Reader which is not subject to filtering + // which may be applied to Reader if it is cached using object selectors. + // UnfilteredReader is used rarely in edge cases that do not persist, so it + // is safe that it should not be cached. It is additionally recommended that + // it should not be cached, as caching it would negate the benefits of cache + // filtering. + UnfilteredReader client.Reader + PhaseValidator *validation.PhaseValidator } // NewPhaseEngine returns a new PhaseEngine instance. @@ -156,6 +163,7 @@ func NewPhaseEngine(opts RevisionEngineOptions) (*machinery.PhaseEngine, error) oe := machinery.NewObjectEngine( opts.Scheme, opts.Reader, opts.Writer, comp, opts.FieldOwner, opts.SystemPrefix, + opts.UnfilteredReader, ) return machinery.NewPhaseEngine(oe, opts.PhaseValidator), nil @@ -176,6 +184,7 @@ func NewRevisionEngine(opts RevisionEngineOptions) (*RevisionEngine, error) { oe := machinery.NewObjectEngine( opts.Scheme, opts.Reader, opts.Writer, comp, opts.FieldOwner, opts.SystemPrefix, + opts.UnfilteredReader, ) pe := machinery.NewPhaseEngine(oe, pval) diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go b/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go index ca9af7669..d5442965b 100644 --- a/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go +++ b/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go @@ -29,6 +29,14 @@ type ObjectEngine struct { fieldOwner string systemPrefix string + + // unfilteredReader is a client.Reader which is not subject to filtering + // which may have been applied to cache. unfilteredReader MUST ONLY be used + // in cases where we need to read an object that may have been subject to + // filtering by the cache. Consequently it will usually also be uncached, + // but it explicitly MUST NOT be used to read objects which will eventually + // be available in the cache. + unfilteredReader client.Reader // may be nil } // NewObjectEngine returns a new Engine instance. @@ -40,12 +48,15 @@ func NewObjectEngine( fieldOwner string, systemPrefix string, + + unfilteredReader client.Reader, // may be nil ) *ObjectEngine { return &ObjectEngine{ - scheme: scheme, - cache: cache, - writer: writer, - comparator: comparator, + scheme: scheme, + cache: cache, + writer: writer, + unfilteredReader: unfilteredReader, + comparator: comparator, fieldOwner: fieldOwner, systemPrefix: systemPrefix, @@ -239,9 +250,26 @@ func (e *ObjectEngine) Reconcile( err := e.create( ctx, desiredObject, options, client.FieldOwner(e.fieldOwner)) if errors.IsAlreadyExists(err) { - // Might be a slow cache or an object created by a different actor - // but excluded by the cache selector. - return nil, NewCreateCollisionError(desiredObject, err.Error()) + if e.unfilteredReader == nil { + return nil, NewCreateCollisionError(desiredObject, err.Error()) + } + + // Object exists but was not found in the cache. This may be due to + // a slow cache or a race, but it can also happen deterministically + // if label selectors excluded it from the cache. Read it directly + // and delegate to the update path, which evaluates + // CollisionProtection settings. + actualObject := desiredObject.DeepCopyObject().(Object) + if err := e.unfilteredReader.Get( + ctx, client.ObjectKeyFromObject(desiredObject), actualObject, + ); err != nil { + return nil, fmt.Errorf("getting object after create collision: %w", err) + } + + return e.objectUpdateHandling( + ctx, revision, desiredObject, + actualObject, options, + ) } if err != nil { diff --git a/vendor/pkg.package-operator.run/boxcutter/managedcache/objectboundaccess.go b/vendor/pkg.package-operator.run/boxcutter/managedcache/objectboundaccess.go index 17ba9fee5..4b16ccdcc 100644 --- a/vendor/pkg.package-operator.run/boxcutter/managedcache/objectboundaccess.go +++ b/vendor/pkg.package-operator.run/boxcutter/managedcache/objectboundaccess.go @@ -62,6 +62,13 @@ type ObjectsPerOwnerPerGVK map[AccessManagerKey]map[schema.GroupVersionKind]int type Accessor interface { client.Writer TrackingCache + + // UnfilteredReader returns a client.Reader with the same permissions as the + // TrackingCache client, but which is not subject to filtering which has + // been applied to the cache. UnfilteredReader is also usually uncached, so + // it should be used carefully and only in cases where the object will never + // be available in the cache. + UnfilteredReader() client.Reader } // NewObjectBoundAccessManager returns a new ObjectBoundAccessManager for T. @@ -152,6 +159,11 @@ type cacheDone struct { type accessor struct { TrackingCache client.Writer + unfilteredReader client.Reader +} + +func (a *accessor) UnfilteredReader() client.Reader { + return a.unfilteredReader } func (m *objectBoundAccessManagerImpl[T]) Source( @@ -314,8 +326,9 @@ func (m *objectBoundAccessManagerImpl[T]) handleAccessorRequest( // start cache ctx, cancel := context.WithCancel(ctx) a := &accessor{ - TrackingCache: ctrlcache, - Writer: client, + TrackingCache: ctrlcache, + Writer: client, + unfilteredReader: client, } entry = accessorEntry{