diff --git a/cmd/ci-operator-checkconfig/main.go b/cmd/ci-operator-checkconfig/main.go index 92e4415525..ba10a2978d 100644 --- a/cmd/ci-operator-checkconfig/main.go +++ b/cmd/ci-operator-checkconfig/main.go @@ -35,6 +35,7 @@ type options struct { clusterProfiles api.ClusterProfilesMap clusterClaimOwners api.ClusterClaimOwnersMap clusterProfileSetDetails api.ClusterProfileSetDetails + allowedAudiences api.AllowedAudiencesMap } func (o *options) parse() error { @@ -42,6 +43,7 @@ func (o *options) parse() error { var profilesConfigPath string var clusterClaimConfigPath string var clusterProfileSetDetailsPath string + var allowedAudiencesConfigPath string fs := flag.NewFlagSet("", flag.ExitOnError) @@ -49,6 +51,7 @@ func (o *options) parse() error { fs.StringVar(&profilesConfigPath, "cluster-profiles-config", "", "Path to the cluster profile config file") fs.StringVar(&clusterClaimConfigPath, "cluster-claim-owners-config", "", "Path to the cluster claim owners config file") fs.StringVar(&clusterProfileSetDetailsPath, "cluster-profile-set-details", "", "Path to the cluster profile set details file") + fs.StringVar(&allowedAudiencesConfigPath, "allowed-audiences-config", "", "Path to the allowed audiences config file") o.Options.Bind(fs) if err := fs.Parse(os.Args[1:]); err != nil { @@ -71,6 +74,14 @@ func (o *options) parse() error { } o.clusterClaimOwners = claimOwners + if allowedAudiencesConfigPath != "" { + allowedAudiences, err := load.AllowedAudiencesConfig(allowedAudiencesConfigPath) + if err != nil { + return fmt.Errorf("failed to load allowed audiences config: %w", err) + } + o.allowedAudiences = allowedAudiences + } + ciOPConfigAgent, err := agents.NewConfigAgent(o.ConfigDir, nil, agents.WithOrg(o.Org), agents.WithRepo(o.Repo)) if err != nil { return fmt.Errorf("failed to create CI Op config agent: %w", err) @@ -111,7 +122,8 @@ func (o *options) validate() (ret []error) { errCh := make(chan error) map_ := func() error { validator := validation.NewValidator(o.clusterProfiles, o.clusterClaimOwners, - validation.WithClusterProfileSetDetails(o.clusterProfileSetDetails)) + validation.WithClusterProfileSetDetails(o.clusterProfileSetDetails), + validation.WithAllowedAudiences(o.allowedAudiences)) for c := range inputCh { if err := o.validateConfiguration(&validator, outputCh, c); err != nil { errCh <- fmt.Errorf("failed to validate configuration %s: %w", c.Metadata.RelativePath(), err) diff --git a/pkg/api/types.go b/pkg/api/types.go index 399d4852a9..e5f1202603 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -1154,6 +1154,27 @@ type LiteralTestStep struct { // NodeArchitecture is the architecture for the node where the test will run. // If set, the generated test pod will include a nodeSelector for this architecture. NodeArchitecture *NodeArchitecture `json:"node_architecture,omitempty"` + // ServiceAccountTokens configures additional projected service account token + // volumes with custom audiences, mounted into the step container. This is + // useful for workloads that need to exchange tokens with external identity + // providers (e.g., GCP Workload Identity Federation). + ServiceAccountTokens []ServiceAccountTokenVolume `json:"service_account_tokens,omitempty"` +} + +// ServiceAccountTokenVolume configures a projected service account token volume +// with a custom audience mounted into the step container. The kubelet handles +// the token request transparently — no additional RBAC is required beyond pod +// creation. +type ServiceAccountTokenVolume struct { + // Audience is the intended audience of the token. The token will only be + // valid for recipients that identify themselves with this audience. + Audience string `json:"audience"` + // MountPath is the path where the token will be mounted in the container. + MountPath string `json:"mount_path"` + // ExpirationSeconds is the requested duration of validity of the token, + // in seconds. The kubelet will automatically rotate the token at 80% of + // its TTL. Defaults to 3600 (1 hour) if not set. + ExpirationSeconds *int64 `json:"expiration_seconds,omitempty"` } // StepParameter is a variable set by the test, with an optional default. @@ -3194,6 +3215,21 @@ type ClusterClaimOwnerDetails struct { Repos []string `yaml:"repos,omitempty"` } +// AllowedAudiencesMap maps audience strings to their ownership details. +// Audiences in this map are restricted to configs from the listed org/repo owners. +// Audiences not in this map are unrestricted. +type AllowedAudiencesMap map[string]AllowedAudienceDetails + +type AllowedAudienceDetails struct { + Audience string `yaml:"audience" json:"audience"` + Owners []AllowedAudienceOwners `yaml:"owners,omitempty" json:"owners,omitempty"` +} + +type AllowedAudienceOwners struct { + Org string `yaml:"org" json:"org"` + Repos []string `yaml:"repos,omitempty" json:"repos,omitempty"` +} + const ( EphemeralClusterTestDoneSignalSecretName = "test-done-signal" ) diff --git a/pkg/api/zz_generated.deepcopy.go b/pkg/api/zz_generated.deepcopy.go index 6afd53e113..30812913c9 100644 --- a/pkg/api/zz_generated.deepcopy.go +++ b/pkg/api/zz_generated.deepcopy.go @@ -10,6 +10,69 @@ import ( "sigs.k8s.io/prow/pkg/config" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedAudienceDetails) DeepCopyInto(out *AllowedAudienceDetails) { + *out = *in + if in.Owners != nil { + in, out := &in.Owners, &out.Owners + *out = make([]AllowedAudienceOwners, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedAudienceDetails. +func (in *AllowedAudienceDetails) DeepCopy() *AllowedAudienceDetails { + if in == nil { + return nil + } + out := new(AllowedAudienceDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedAudienceOwners) DeepCopyInto(out *AllowedAudienceOwners) { + *out = *in + if in.Repos != nil { + in, out := &in.Repos, &out.Repos + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedAudienceOwners. +func (in *AllowedAudienceOwners) DeepCopy() *AllowedAudienceOwners { + if in == nil { + return nil + } + out := new(AllowedAudienceOwners) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in AllowedAudiencesMap) DeepCopyInto(out *AllowedAudiencesMap) { + { + in := &in + *out = make(AllowedAudiencesMap, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedAudiencesMap. +func (in AllowedAudiencesMap) DeepCopy() AllowedAudiencesMap { + if in == nil { + return nil + } + out := new(AllowedAudiencesMap) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildArg) DeepCopyInto(out *BuildArg) { *out = *in @@ -976,6 +1039,13 @@ func (in *LiteralTestStep) DeepCopyInto(out *LiteralTestStep) { *out = new(NodeArchitecture) **out = **in } + if in.ServiceAccountTokens != nil { + in, out := &in.ServiceAccountTokens, &out.ServiceAccountTokens + *out = make([]ServiceAccountTokenVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiteralTestStep. @@ -2017,6 +2087,26 @@ func (in *Secret) DeepCopy() *Secret { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountTokenVolume) DeepCopyInto(out *ServiceAccountTokenVolume) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenVolume. +func (in *ServiceAccountTokenVolume) DeepCopy() *ServiceAccountTokenVolume { + if in == nil { + return nil + } + out := new(ServiceAccountTokenVolume) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SlackReporterConfig) DeepCopyInto(out *SlackReporterConfig) { *out = *in diff --git a/pkg/load/load.go b/pkg/load/load.go index a2df2adcf6..c2a4198f63 100644 --- a/pkg/load/load.go +++ b/pkg/load/load.go @@ -338,3 +338,31 @@ func ClusterClaimOwnersConfig(configPath string) (api.ClusterClaimOwnersMap, err } return clusterClaimOwnersMap, nil } + +// AllowedAudiencesConfig loads allowed audiences information from its config in the release repository. +// If the file does not exist, an empty map is returned. +func AllowedAudiencesConfig(configPath string) (api.AllowedAudiencesMap, error) { + configContents, err := os.ReadFile(configPath) + if err != nil { + if os.IsNotExist(err) { + return make(api.AllowedAudiencesMap), nil + } + return nil, fmt.Errorf("failed to read allowed audiences config: %w", err) + } + + var audiencesList []api.AllowedAudienceDetails + if err = yaml.UnmarshalStrict(configContents, &audiencesList); err != nil { + return nil, fmt.Errorf("failed to unmarshall allowed audiences config: %w", err) + } + allowedAudiencesMap := make(api.AllowedAudiencesMap, len(audiencesList)) + for i, a := range audiencesList { + if a.Audience == "" { + return nil, fmt.Errorf("allowed audiences config entry %d: audience must not be empty", i) + } + if _, exists := allowedAudiencesMap[a.Audience]; exists { + return nil, fmt.Errorf("allowed audiences config: duplicate audience %q", a.Audience) + } + allowedAudiencesMap[a.Audience] = a + } + return allowedAudiencesMap, nil +} diff --git a/pkg/steps/multi_stage/gen.go b/pkg/steps/multi_stage/gen.go index 70af8f8187..3d415ba01a 100644 --- a/pkg/steps/multi_stage/gen.go +++ b/pkg/steps/multi_stage/gen.go @@ -149,10 +149,12 @@ func (s *multiStageTestStep) generatePods( pod.Annotations[base_steps.AnnotationSaveContainerLogs] = "true" pod.Labels[MultiStageTestLabel] = s.name needsKubeConfig := isKubeconfigNeeded(&step, genPodOpts) - if needsKubeConfig { + if needsKubeConfig || len(step.ServiceAccountTokens) > 0 { pod.Spec.ServiceAccountName = s.name } else { pod.Spec.ServiceAccountName = "" + } + if !needsKubeConfig { no := false pod.Spec.AutomountServiceAccountToken = &no } @@ -249,6 +251,7 @@ func (s *multiStageTestStep) generatePods( if step.RunAsScript != nil && *step.RunAsScript { addCommandScript(commandConfigMapForTest(s.name), pod) } + addServiceAccountTokenVolumes(step.ServiceAccountTokens, pod) if s.vpnConf != nil { caps := coreapi.Capabilities{ Add: []coreapi.Capability{"NET_ADMIN"}, @@ -636,6 +639,35 @@ func addCommandScript(name string, pod *coreapi.Pod) { }) } +func addServiceAccountTokenVolumes(tokens []api.ServiceAccountTokenVolume, pod *coreapi.Pod) { + for i, token := range tokens { + volumeName := fmt.Sprintf("sa-token-%d", i) + expSeconds := int64(3600) + if token.ExpirationSeconds != nil { + expSeconds = *token.ExpirationSeconds + } + pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{ + Name: volumeName, + VolumeSource: coreapi.VolumeSource{ + Projected: &coreapi.ProjectedVolumeSource{ + Sources: []coreapi.VolumeProjection{{ + ServiceAccountToken: &coreapi.ServiceAccountTokenProjection{ + Audience: token.Audience, + ExpirationSeconds: &expSeconds, + Path: "token", + }, + }}, + }, + }, + }) + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, coreapi.VolumeMount{ + Name: volumeName, + MountPath: token.MountPath, + ReadOnly: true, + }) + } +} + func addLeaseProxyScripts(pod *coreapi.Pod, c *coreapi.Container) { pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{ Name: "lease-proxy", diff --git a/pkg/steps/multi_stage/gen_test.go b/pkg/steps/multi_stage/gen_test.go index b86f8b8b0f..691ff988fd 100644 --- a/pkg/steps/multi_stage/gen_test.go +++ b/pkg/steps/multi_stage/gen_test.go @@ -142,6 +142,49 @@ func TestGeneratePods(t *testing.T) { }}, }, }, + { + name: "service account token projection", + config: &api.ReleaseBuildConfiguration{ + Tests: []api.TestStepConfiguration{{ + As: "test", + MultiStageTestConfigurationLiteral: &api.MultiStageTestConfigurationLiteral{ + Test: []api.LiteralTestStep{{ + As: "step0", + From: "src", + Commands: "command0", + ServiceAccountTokens: []api.ServiceAccountTokenVolume{{ + Audience: "gcp-wif-audience", + MountPath: "/var/run/secrets/wif", + }, { + Audience: "vault", + MountPath: "/var/run/secrets/vault", + ExpirationSeconds: ptr.To(int64(7200)), + }}, + }}, + }, + }}, + }, + }, + { + name: "service account token projection with no_kubeconfig", + config: &api.ReleaseBuildConfiguration{ + Tests: []api.TestStepConfiguration{{ + As: "test", + MultiStageTestConfigurationLiteral: &api.MultiStageTestConfigurationLiteral{ + Test: []api.LiteralTestStep{{ + As: "step0", + From: "src", + Commands: "command0", + NoKubeconfig: ptr.To(true), + ServiceAccountTokens: []api.ServiceAccountTokenVolume{{ + Audience: "gcp-wif-audience", + MountPath: "/var/run/secrets/wif", + }}, + }}, + }, + }}, + }, + }, { name: "lease proxy server available", config: &api.ReleaseBuildConfiguration{ diff --git a/pkg/steps/multi_stage/testdata/zz_fixture_TestGeneratePods_service_account_token_projection.yaml b/pkg/steps/multi_stage/testdata/zz_fixture_TestGeneratePods_service_account_token_projection.yaml new file mode 100644 index 0000000000..115bd5ecec --- /dev/null +++ b/pkg/steps/multi_stage/testdata/zz_fixture_TestGeneratePods_service_account_token_projection.yaml @@ -0,0 +1,184 @@ +- metadata: + annotations: + ci-operator.openshift.io/container-sub-tests: test + ci-operator.openshift.io/save-container-logs: "true" + ci.openshift.io/job-spec: "" + creationTimestamp: null + labels: + OPENSHIFT_CI: "true" + ci.openshift.io/jobid: prow_job_id + ci.openshift.io/jobname: job + ci.openshift.io/jobtype: postsubmit + ci.openshift.io/metadata.branch: base_ref + ci.openshift.io/metadata.org: org + ci.openshift.io/metadata.repo: repo + ci.openshift.io/metadata.step: step0 + ci.openshift.io/metadata.target: target + ci.openshift.io/metadata.variant: variant + ci.openshift.io/multi-stage-test: test + created-by-ci: "true" + name: test-step0 + namespace: namespace + spec: + containers: + - args: + - /tools/entrypoint + command: + - /tmp/entrypoint-wrapper/entrypoint-wrapper + env: + - name: BUILD_ID + value: build id + - name: CI + value: "true" + - name: JOB_NAME + value: job + - name: JOB_SPEC + value: '{"type":"postsubmit","job":"job","buildid":"build id","prowjobid":"prow + job id","refs":{"org":"org","repo":"repo","base_ref":"base ref","base_sha":"base + sha"},"decoration_config":{"timeout":"2h0m0s","grace_period":"15s","utility_images":{"entrypoint":"entrypoint","sidecar":"sidecar"}}}' + - name: JOB_TYPE + value: postsubmit + - name: OPENSHIFT_CI + value: "true" + - name: PROW_JOB_ID + value: prow job id + - name: PULL_BASE_REF + value: base ref + - name: PULL_BASE_SHA + value: base sha + - name: PULL_REFS + value: base ref:base sha + - name: REPO_NAME + value: repo + - name: REPO_OWNER + value: org + - name: SRC_BASE + value: org/repo + - name: SRC_HOST + value: github.com + - name: GIT_CONFIG_COUNT + value: "1" + - name: GIT_CONFIG_KEY_0 + value: safe.directory + - name: GIT_CONFIG_VALUE_0 + value: '*' + - name: ENTRYPOINT_OPTIONS + value: '{"timeout":7200000000000,"grace_period":15000000000,"artifact_dir":"/logs/artifacts","args":["/bin/bash","-c","#!/bin/bash\nset + -eu\ncommand0"],"container_name":"test","process_log":"/logs/process-log.txt","marker_file":"/logs/marker-file.txt","metadata_file":"/logs/artifacts/metadata.json"}' + - name: ARTIFACT_DIR + value: /logs/artifacts + - name: NAMESPACE + value: namespace + - name: JOB_NAME_SAFE + value: test + - name: JOB_NAME_HASH + value: 5e8c9 + - name: UNIQUE_HASH + value: 5e8c9 + - name: KUBECONFIG + value: /var/run/secrets/ci.openshift.io/multi-stage/kubeconfig + - name: KUBECONFIGMINIMAL + value: /var/run/secrets/ci.openshift.io/multi-stage/kubeconfig-minimal + - name: KUBEADMIN_PASSWORD_FILE + value: /var/run/secrets/ci.openshift.io/multi-stage/kubeadmin-password + - name: SHARED_DIR + value: /var/run/secrets/ci.openshift.io/multi-stage + - name: LEASE_PROXY_CLIENT_SH + value: /opt/scripts/lease-proxy/client.sh + image: pipeline:src + name: test + resources: {} + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /logs + name: logs + - mountPath: /tools + name: tools + - mountPath: /alabama + name: home + - mountPath: /tmp/entrypoint-wrapper + name: entrypoint-wrapper + - mountPath: /dev/shm + name: dshm + - mountPath: /var/run/secrets/ci.openshift.io/multi-stage + name: test + - mountPath: /var/run/secrets/wif + name: sa-token-0 + readOnly: true + - mountPath: /var/run/secrets/vault + name: sa-token-1 + readOnly: true + - mountPath: /opt/scripts/lease-proxy + name: lease-proxy + readOnly: true + - env: + - name: JOB_SPEC + - name: SIDECAR_OPTIONS + value: '{"gcs_options":{"items":["/logs/artifacts"],"sub_dir":"artifacts/test/step0","dry_run":false},"entries":[{"args":["/bin/bash","-c","#!/bin/bash\nset + -eu\ncommand0"],"container_name":"test","process_log":"/logs/process-log.txt","marker_file":"/logs/marker-file.txt","metadata_file":"/logs/artifacts/metadata.json"}],"ignore_interrupts":true,"censoring_options":{}}' + image: sidecar + name: sidecar + resources: {} + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /logs + name: logs + initContainers: + - args: + - --copy-mode-only + image: entrypoint + name: place-entrypoint + resources: {} + volumeMounts: + - mountPath: /tools + name: tools + - args: + - /bin/entrypoint-wrapper + - /tmp/entrypoint-wrapper/entrypoint-wrapper + command: + - cp + image: quay-proxy.ci.openshift.org/openshift/ci:ci_entrypoint-wrapper_latest + name: cp-entrypoint-wrapper + resources: {} + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /tmp/entrypoint-wrapper + name: entrypoint-wrapper + nodeName: node-name + restartPolicy: Never + serviceAccountName: test + terminationGracePeriodSeconds: 18 + volumes: + - emptyDir: {} + name: logs + - emptyDir: {} + name: tools + - emptyDir: {} + name: home + - emptyDir: {} + name: entrypoint-wrapper + - emptyDir: + medium: Memory + sizeLimit: 2G + name: dshm + - name: test + secret: + secretName: test + - name: sa-token-0 + projected: + sources: + - serviceAccountToken: + audience: gcp-wif-audience + expirationSeconds: 3600 + path: token + - name: sa-token-1 + projected: + sources: + - serviceAccountToken: + audience: vault + expirationSeconds: 7200 + path: token + - configMap: + name: lease-proxy + name: lease-proxy + status: {} diff --git a/pkg/steps/multi_stage/testdata/zz_fixture_TestGeneratePods_service_account_token_projection_with_no_kubeconfig.yaml b/pkg/steps/multi_stage/testdata/zz_fixture_TestGeneratePods_service_account_token_projection_with_no_kubeconfig.yaml new file mode 100644 index 0000000000..2c08706939 --- /dev/null +++ b/pkg/steps/multi_stage/testdata/zz_fixture_TestGeneratePods_service_account_token_projection_with_no_kubeconfig.yaml @@ -0,0 +1,170 @@ +- metadata: + annotations: + ci-operator.openshift.io/container-sub-tests: test + ci-operator.openshift.io/save-container-logs: "true" + ci.openshift.io/job-spec: "" + creationTimestamp: null + labels: + OPENSHIFT_CI: "true" + ci.openshift.io/jobid: prow_job_id + ci.openshift.io/jobname: job + ci.openshift.io/jobtype: postsubmit + ci.openshift.io/metadata.branch: base_ref + ci.openshift.io/metadata.org: org + ci.openshift.io/metadata.repo: repo + ci.openshift.io/metadata.step: step0 + ci.openshift.io/metadata.target: target + ci.openshift.io/metadata.variant: variant + ci.openshift.io/multi-stage-test: test + created-by-ci: "true" + name: test-step0 + namespace: namespace + spec: + automountServiceAccountToken: false + containers: + - args: + - --mode=skip-kubeconfig + - /tools/entrypoint + command: + - /tmp/entrypoint-wrapper/entrypoint-wrapper + env: + - name: BUILD_ID + value: build id + - name: CI + value: "true" + - name: JOB_NAME + value: job + - name: JOB_SPEC + value: '{"type":"postsubmit","job":"job","buildid":"build id","prowjobid":"prow + job id","refs":{"org":"org","repo":"repo","base_ref":"base ref","base_sha":"base + sha"},"decoration_config":{"timeout":"2h0m0s","grace_period":"15s","utility_images":{"entrypoint":"entrypoint","sidecar":"sidecar"}}}' + - name: JOB_TYPE + value: postsubmit + - name: OPENSHIFT_CI + value: "true" + - name: PROW_JOB_ID + value: prow job id + - name: PULL_BASE_REF + value: base ref + - name: PULL_BASE_SHA + value: base sha + - name: PULL_REFS + value: base ref:base sha + - name: REPO_NAME + value: repo + - name: REPO_OWNER + value: org + - name: SRC_BASE + value: org/repo + - name: SRC_HOST + value: github.com + - name: GIT_CONFIG_COUNT + value: "1" + - name: GIT_CONFIG_KEY_0 + value: safe.directory + - name: GIT_CONFIG_VALUE_0 + value: '*' + - name: ENTRYPOINT_OPTIONS + value: '{"timeout":7200000000000,"grace_period":15000000000,"artifact_dir":"/logs/artifacts","args":["/bin/bash","-c","#!/bin/bash\nset + -eu\ncommand0"],"container_name":"test","process_log":"/logs/process-log.txt","marker_file":"/logs/marker-file.txt","metadata_file":"/logs/artifacts/metadata.json"}' + - name: ARTIFACT_DIR + value: /logs/artifacts + - name: NAMESPACE + value: namespace + - name: JOB_NAME_SAFE + value: test + - name: JOB_NAME_HASH + value: 5e8c9 + - name: UNIQUE_HASH + value: 5e8c9 + - name: SHARED_DIR + value: /var/run/secrets/ci.openshift.io/multi-stage + - name: LEASE_PROXY_CLIENT_SH + value: /opt/scripts/lease-proxy/client.sh + image: pipeline:src + name: test + resources: {} + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /logs + name: logs + - mountPath: /tools + name: tools + - mountPath: /alabama + name: home + - mountPath: /tmp/entrypoint-wrapper + name: entrypoint-wrapper + - mountPath: /dev/shm + name: dshm + - mountPath: /var/run/secrets/ci.openshift.io/multi-stage + name: test + - mountPath: /var/run/secrets/wif + name: sa-token-0 + readOnly: true + - mountPath: /opt/scripts/lease-proxy + name: lease-proxy + readOnly: true + - env: + - name: JOB_SPEC + - name: SIDECAR_OPTIONS + value: '{"gcs_options":{"items":["/logs/artifacts"],"sub_dir":"artifacts/test/step0","dry_run":false},"entries":[{"args":["/bin/bash","-c","#!/bin/bash\nset + -eu\ncommand0"],"container_name":"test","process_log":"/logs/process-log.txt","marker_file":"/logs/marker-file.txt","metadata_file":"/logs/artifacts/metadata.json"}],"ignore_interrupts":true,"censoring_options":{}}' + image: sidecar + name: sidecar + resources: {} + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /logs + name: logs + initContainers: + - args: + - --copy-mode-only + image: entrypoint + name: place-entrypoint + resources: {} + volumeMounts: + - mountPath: /tools + name: tools + - args: + - /bin/entrypoint-wrapper + - /tmp/entrypoint-wrapper/entrypoint-wrapper + command: + - cp + image: quay-proxy.ci.openshift.org/openshift/ci:ci_entrypoint-wrapper_latest + name: cp-entrypoint-wrapper + resources: {} + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /tmp/entrypoint-wrapper + name: entrypoint-wrapper + nodeName: node-name + restartPolicy: Never + serviceAccountName: test + terminationGracePeriodSeconds: 18 + volumes: + - emptyDir: {} + name: logs + - emptyDir: {} + name: tools + - emptyDir: {} + name: home + - emptyDir: {} + name: entrypoint-wrapper + - emptyDir: + medium: Memory + sizeLimit: 2G + name: dshm + - name: test + secret: + secretName: test + - name: sa-token-0 + projected: + sources: + - serviceAccountToken: + audience: gcp-wif-audience + expirationSeconds: 3600 + path: token + - configMap: + name: lease-proxy + name: lease-proxy + status: {} diff --git a/pkg/validation/config.go b/pkg/validation/config.go index 1fada5ff15..8fadb6cb4f 100644 --- a/pkg/validation/config.go +++ b/pkg/validation/config.go @@ -17,6 +17,7 @@ import ( type Validator struct { validClusterProfiles api.ClusterProfilesMap validClusterClaimOwners api.ClusterClaimOwnersMap + allowedAudiences api.AllowedAudiencesMap // hasTrapCache avoids redundant regexp searches on step commands. hasTrapCache map[string]bool cpsDetails api.ClusterProfileSetDetails @@ -28,6 +29,10 @@ func WithClusterProfileSetDetails(cpsDetails api.ClusterProfileSetDetails) func( return func(v *Validator) { v.cpsDetails = cpsDetails } } +func WithAllowedAudiences(audiences api.AllowedAudiencesMap) func(*Validator) { + return func(v *Validator) { v.allowedAudiences = audiences } +} + // NewValidator creates an object that optimizes bulk validations. func NewValidator(profiles api.ClusterProfilesMap, clusterClaimOwners api.ClusterClaimOwnersMap, opts ...ValidatorOption) Validator { v := Validator{ @@ -693,6 +698,6 @@ func Observer(observer api.Observer) []error { // this observer in the future. This technically disallows users from using `from:` // to refer to an image from a release payload for an observer, but this should be // not of any real issue and will at least be obvious to the user on presubmit. - errs = append(errs, validateFromAndFromImage(newContext("", nil, nil, nil), observer.From, observer.FromImage, nil, nil)...) + errs = append(errs, validateFromAndFromImage(newContext("", nil, nil, nil, nil), observer.From, observer.FromImage, nil, nil)...) return errs } diff --git a/pkg/validation/test.go b/pkg/validation/test.go index 99fb29ab6d..bddeda212a 100644 --- a/pkg/validation/test.go +++ b/pkg/validation/test.go @@ -73,6 +73,8 @@ type context struct { inputImagesSeen testInputImages // releases is used to validate references to release images . releases sets.Set[string] + // metadata is used for ownership checks (e.g., audience restrictions). + metadata *api.Metadata } // newContext creates a top-level context. @@ -81,6 +83,7 @@ func newContext( env api.TestEnvironment, releases sets.Set[string], inputImagesSeen testInputImages, + metadata *api.Metadata, ) *context { return &context{ field: field, @@ -89,6 +92,7 @@ func newContext( leasesSeen: sets.New[string](), inputImagesSeen: inputImagesSeen, releases: releases, + metadata: metadata, } } @@ -586,7 +590,7 @@ func (v *Validator) validateTestConfigurationType( clusterCount++ validationErrors = append(validationErrors, v.validateClusterProfile(fieldRoot, testConfig.ClusterProfile, test.As, metadata)...) } - context := newContext(fieldPath(fieldRoot), testConfig.Environment, releases, inputImagesSeen) + context := newContext(fieldPath(fieldRoot), testConfig.Environment, releases, inputImagesSeen, metadata) validationErrors = append(validationErrors, validateLeases(context.addField("leases"), testConfig.Leases)...) if testConfig.NodeArchitecture != nil { validationErrors = append(validationErrors, validateNodeArchitecture(fieldRoot, *testConfig.NodeArchitecture)) @@ -597,7 +601,7 @@ func (v *Validator) validateTestConfigurationType( } if testConfig := test.MultiStageTestConfigurationLiteral; testConfig != nil { typeCount++ - context := newContext(fieldPath(fieldRoot).addField("steps"), testConfig.Environment, releases, inputImagesSeen) + context := newContext(fieldPath(fieldRoot).addField("steps"), testConfig.Environment, releases, inputImagesSeen, metadata) if testConfig.ClusterProfile != "" { clusterCount++ validationErrors = append(validationErrors, v.validateClusterProfile(fieldRoot, testConfig.ClusterProfile, test.As, metadata)...) @@ -706,6 +710,7 @@ func (v *Validator) validateLiteralTestStep(context *context, stage testStage, s } ret = append(ret, validateDependencies(string(context.field), step.Dependencies)...) ret = append(ret, validateLeases(context.addField("leases"), step.Leases)...) + ret = append(ret, v.validateServiceAccountTokens(string(context.field), step.ServiceAccountTokens, step.Credentials, context.metadata)...) if step.NodeArchitecture != nil { if err := validateNodeArchitecture(string(context.field), *step.NodeArchitecture); err != nil { ret = append(ret, err) @@ -953,6 +958,83 @@ func validateLeases(context *context, leases []api.StepLease) (ret []error) { return } +func (v *Validator) validateServiceAccountTokens(fieldRoot string, tokens []api.ServiceAccountTokenVolume, credentials []api.CredentialReference, metadata *api.Metadata) (ret []error) { + mountPaths := sets.New[string]() + for i, token := range tokens { + fieldPath := fmt.Sprintf("%s.service_account_tokens[%d]", fieldRoot, i) + if token.Audience == "" { + ret = append(ret, fmt.Errorf("%s.audience: must not be empty", fieldPath)) + } else if v.allowedAudiences != nil { + if details, ok := v.allowedAudiences[token.Audience]; ok { + if err := verifyAudienceOwnership(details, metadata); err != nil { + ret = append(ret, err) + } + } + } + if token.MountPath == "" { + ret = append(ret, fmt.Errorf("%s.mount_path: must not be empty", fieldPath)) + } else if !filepath.IsAbs(token.MountPath) { + ret = append(ret, fmt.Errorf("%s.mount_path: must be an absolute path", fieldPath)) + } else if mountPaths.Has(token.MountPath) { + ret = append(ret, fmt.Errorf("%s.mount_path: duplicate mount path %q", fieldPath, token.MountPath)) + } else { + for j, cred := range credentials { + if cred.MountPath == "" { + continue + } + if token.MountPath == cred.MountPath { + ret = append(ret, fmt.Errorf("%s.mount_path: collides with credentials[%d] mount path %q", fieldPath, j, cred.MountPath)) + } else if relPath, err := filepath.Rel(cred.MountPath, token.MountPath); err == nil && !strings.Contains(relPath, "..") { + ret = append(ret, fmt.Errorf("%s.mount_path: %s is under credentials[%d] mount path %s", fieldPath, token.MountPath, j, cred.MountPath)) + } else if relPath, err := filepath.Rel(token.MountPath, cred.MountPath); err == nil && !strings.Contains(relPath, "..") { + ret = append(ret, fmt.Errorf("%s.mount_path: credentials[%d] mount path %s is under %s", fieldPath, j, cred.MountPath, token.MountPath)) + } + } + for j, other := range tokens[:i] { + if other.MountPath == "" { + continue + } + if relPath, err := filepath.Rel(other.MountPath, token.MountPath); err == nil && !strings.Contains(relPath, "..") { + ret = append(ret, fmt.Errorf("%s.mount_path: %s is under service_account_tokens[%d] mount path %s", fieldPath, token.MountPath, j, other.MountPath)) + } + if relPath, err := filepath.Rel(token.MountPath, other.MountPath); err == nil && !strings.Contains(relPath, "..") { + ret = append(ret, fmt.Errorf("%s.mount_path: service_account_tokens[%d] mount path %s is under %s", fieldPath, j, other.MountPath, token.MountPath)) + } + } + mountPaths.Insert(token.MountPath) + } + if token.ExpirationSeconds != nil && *token.ExpirationSeconds < 600 { + ret = append(ret, fmt.Errorf("%s.expiration_seconds: must be at least 600 (10 minutes)", fieldPath)) + } + } + return +} + +// verifyAudienceOwnership checks if metadata's org and repo match those in the +// audience config, verifying if it's one of the owners of the audience. +func verifyAudienceOwnership(audience api.AllowedAudienceDetails, m *api.Metadata) error { + // When metadata is nil (e.g., standalone registry reference validation via + // IsValidReference), we can't determine org/repo ownership, so we permit the + // audience. This is intentionally more permissive than verifyClusterProfileOwnership + // because audience restrictions are enforced at the resolved config level where + // metadata is always available. + if m == nil || m.Org == "" { + return nil + } + if len(audience.Owners) == 0 { + return nil + } + for _, owner := range audience.Owners { + if owner.Org != m.Org { + continue + } + if owner.Repos == nil || util.Contains(owner.Repos, m.Repo) { + return nil + } + } + return fmt.Errorf("%s/%s is not allowed to use service account token audience %q", m.Org, m.Repo, audience.Audience) +} + func validateNodeArchitectureOverrides(fieldRoot string, nodeArchitectureOverrides api.NodeArchitectureOverrides) error { for index, arch := range nodeArchitectureOverrides { if err := arch.Validate(); err != nil { diff --git a/pkg/validation/test_test.go b/pkg/validation/test_test.go index ddf96f8887..39297e0218 100644 --- a/pkg/validation/test_test.go +++ b/pkg/validation/test_test.go @@ -1083,6 +1083,63 @@ func TestValidateTestSteps(t *testing.T) { errs: []error{ errors.New("test best-effort contains best_effort without timeout"), }, + }, { + name: "SA token mount path collides with credential mount path", + steps: []api.TestStep{{ + LiteralTestStep: &api.LiteralTestStep{ + As: "as", + From: "from", + Commands: "commands", + Resources: resources, + Credentials: []api.CredentialReference{{ + Namespace: "ns", Name: "cred", MountPath: "/var/run/secrets/creds", + }}, + ServiceAccountTokens: []api.ServiceAccountTokenVolume{{ + Audience: "aud", MountPath: "/var/run/secrets/creds", + }}, + }, + }}, + errs: []error{ + errors.New(`test[0].service_account_tokens[0].mount_path: collides with credentials[0] mount path "/var/run/secrets/creds"`), + }, + }, { + name: "SA token mount path under credential mount path", + steps: []api.TestStep{{ + LiteralTestStep: &api.LiteralTestStep{ + As: "as", + From: "from", + Commands: "commands", + Resources: resources, + Credentials: []api.CredentialReference{{ + Namespace: "ns", Name: "cred", MountPath: "/var/run/secrets", + }}, + ServiceAccountTokens: []api.ServiceAccountTokenVolume{{ + Audience: "aud", MountPath: "/var/run/secrets/wif", + }}, + }, + }}, + errs: []error{ + errors.New("test[0].service_account_tokens[0].mount_path: /var/run/secrets/wif is under credentials[0] mount path /var/run/secrets"), + }, + }, { + name: "credential mount path under SA token mount path", + steps: []api.TestStep{{ + LiteralTestStep: &api.LiteralTestStep{ + As: "as", + From: "from", + Commands: "commands", + Resources: resources, + Credentials: []api.CredentialReference{{ + Namespace: "ns", Name: "cred", MountPath: "/var/run/secrets/wif/nested", + }}, + ServiceAccountTokens: []api.ServiceAccountTokenVolume{{ + Audience: "aud", MountPath: "/var/run/secrets/wif", + }}, + }, + }}, + errs: []error{ + errors.New("test[0].service_account_tokens[0].mount_path: credentials[0] mount path /var/run/secrets/wif/nested is under /var/run/secrets/wif"), + }, }, { name: "cluster claim release", steps: []api.TestStep{{ @@ -1095,7 +1152,7 @@ func TestValidateTestSteps(t *testing.T) { clusterClaim: api.ClaimRelease{ReleaseName: "myclaim-as", OverrideName: "myclaim"}, }} { t.Run(tc.name, func(t *testing.T) { - context := newContext("test", nil, tc.releases, make(testInputImages)) + context := newContext("test", nil, tc.releases, make(testInputImages), nil) if tc.seen != nil { context.namesSeen = tc.seen } @@ -1136,7 +1193,7 @@ func TestValidatePostSteps(t *testing.T) { }}, }} { t.Run(tc.name, func(t *testing.T) { - context := newContext("test", nil, tc.releases, make(testInputImages)) + context := newContext("test", nil, tc.releases, make(testInputImages), nil) if tc.seen != nil { context.namesSeen = tc.seen } @@ -1174,7 +1231,7 @@ func TestValidateParameters(t *testing.T) { }} { t.Run(tc.name, func(t *testing.T) { v := NewValidator(nil, nil) - err := v.validateLiteralTestStep(newContext("test", tc.env, tc.releases, make(testInputImages)), testStageTest, api.LiteralTestStep{ + err := v.validateLiteralTestStep(newContext("test", tc.env, tc.releases, make(testInputImages), nil), testStageTest, api.LiteralTestStep{ As: "as", From: "from", Commands: "commands", diff --git a/pkg/webreg/zz_generated.ci_operator_reference.go b/pkg/webreg/zz_generated.ci_operator_reference.go index 6f711f2a6b..dc76bfcce9 100644 --- a/pkg/webreg/zz_generated.ci_operator_reference.go +++ b/pkg/webreg/zz_generated.ci_operator_reference.go @@ -845,6 +845,20 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # RunAsScript defines if this step should be executed as a script mounted\n" + " # in the test container instead of being executed directly via bash\n" + " run_as_script: false\n" + + " # ServiceAccountTokens configures additional projected service account token\n" + + " # volumes with custom audiences, mounted into the step container. This is\n" + + " # useful for workloads that need to exchange tokens with external identity\n" + + " # providers (e.g., GCP Workload Identity Federation).\n" + + " service_account_tokens:\n" + + " - # Audience is the intended audience of the token. The token will only be\n" + + " # valid for recipients that identify themselves with this audience.\n" + + " audience: ' '\n" + + " # ExpirationSeconds is the requested duration of validity of the token,\n" + + " # in seconds. The kubelet will automatically rotate the token at 80% of\n" + + " # its TTL. Defaults to 3600 (1 hour) if not set.\n" + + " expiration_seconds: 0\n" + + " # MountPath is the path where the token will be mounted in the container.\n" + + " mount_path: ' '\n" + " # Timeout is how long the we will wait before aborting a job with SIGINT.\n" + " timeout: 0s\n" + " # Pre is the array of test steps run to set up the environment for the test.\n" + @@ -953,6 +967,20 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # RunAsScript defines if this step should be executed as a script mounted\n" + " # in the test container instead of being executed directly via bash\n" + " run_as_script: false\n" + + " # ServiceAccountTokens configures additional projected service account token\n" + + " # volumes with custom audiences, mounted into the step container. This is\n" + + " # useful for workloads that need to exchange tokens with external identity\n" + + " # providers (e.g., GCP Workload Identity Federation).\n" + + " service_account_tokens:\n" + + " - # Audience is the intended audience of the token. The token will only be\n" + + " # valid for recipients that identify themselves with this audience.\n" + + " audience: ' '\n" + + " # ExpirationSeconds is the requested duration of validity of the token,\n" + + " # in seconds. The kubelet will automatically rotate the token at 80% of\n" + + " # its TTL. Defaults to 3600 (1 hour) if not set.\n" + + " expiration_seconds: 0\n" + + " # MountPath is the path where the token will be mounted in the container.\n" + + " mount_path: ' '\n" + " # Timeout is how long the we will wait before aborting a job with SIGINT.\n" + " timeout: 0s\n" + " # Test is the array of test steps that define the actual test.\n" + @@ -1061,6 +1089,20 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # RunAsScript defines if this step should be executed as a script mounted\n" + " # in the test container instead of being executed directly via bash\n" + " run_as_script: false\n" + + " # ServiceAccountTokens configures additional projected service account token\n" + + " # volumes with custom audiences, mounted into the step container. This is\n" + + " # useful for workloads that need to exchange tokens with external identity\n" + + " # providers (e.g., GCP Workload Identity Federation).\n" + + " service_account_tokens:\n" + + " - # Audience is the intended audience of the token. The token will only be\n" + + " # valid for recipients that identify themselves with this audience.\n" + + " audience: ' '\n" + + " # ExpirationSeconds is the requested duration of validity of the token,\n" + + " # in seconds. The kubelet will automatically rotate the token at 80% of\n" + + " # its TTL. Defaults to 3600 (1 hour) if not set.\n" + + " expiration_seconds: 0\n" + + " # MountPath is the path where the token will be mounted in the container.\n" + + " mount_path: ' '\n" + " # Timeout is how long the we will wait before aborting a job with SIGINT.\n" + " timeout: 0s\n" + " # Override job timeout\n" + @@ -1263,6 +1305,11 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # LiteralTestStep is a full test step definition.\n" + " \"\": \"\"\n" + " run_as_script: false\n" + + " service_account_tokens:\n" + + " # LiteralTestStep is a full test step definition.\n" + + " - audience: ' '\n" + + " expiration_seconds: 0\n" + + " mount_path: ' '\n" + " timeout: 0s\n" + " # Pre is the array of test steps run to set up the environment for the test.\n" + " pre:\n" + @@ -1334,6 +1381,11 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # LiteralTestStep is a full test step definition.\n" + " \"\": \"\"\n" + " run_as_script: false\n" + + " service_account_tokens:\n" + + " # LiteralTestStep is a full test step definition.\n" + + " - audience: ' '\n" + + " expiration_seconds: 0\n" + + " mount_path: ' '\n" + " timeout: 0s\n" + " # Test is the array of test steps that define the actual test.\n" + " test:\n" + @@ -1405,6 +1457,11 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # LiteralTestStep is a full test step definition.\n" + " \"\": \"\"\n" + " run_as_script: false\n" + + " service_account_tokens:\n" + + " # LiteralTestStep is a full test step definition.\n" + + " - audience: ' '\n" + + " expiration_seconds: 0\n" + + " mount_path: ' '\n" + " timeout: 0s\n" + " # Workflow is the name of the workflow to be used for this configuration. For fields defined in both\n" + " # the config and the workflow, the fields from the config will override what is set in Workflow.\n" + @@ -1777,6 +1834,20 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # RunAsScript defines if this step should be executed as a script mounted\n" + " # in the test container instead of being executed directly via bash\n" + " run_as_script: false\n" + + " # ServiceAccountTokens configures additional projected service account token\n" + + " # volumes with custom audiences, mounted into the step container. This is\n" + + " # useful for workloads that need to exchange tokens with external identity\n" + + " # providers (e.g., GCP Workload Identity Federation).\n" + + " service_account_tokens:\n" + + " - # Audience is the intended audience of the token. The token will only be\n" + + " # valid for recipients that identify themselves with this audience.\n" + + " audience: ' '\n" + + " # ExpirationSeconds is the requested duration of validity of the token,\n" + + " # in seconds. The kubelet will automatically rotate the token at 80% of\n" + + " # its TTL. Defaults to 3600 (1 hour) if not set.\n" + + " expiration_seconds: 0\n" + + " # MountPath is the path where the token will be mounted in the container.\n" + + " mount_path: ' '\n" + " # Timeout is how long the we will wait before aborting a job with SIGINT.\n" + " timeout: 0s\n" + " # Pre is the array of test steps run to set up the environment for the test.\n" + @@ -1885,6 +1956,20 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # RunAsScript defines if this step should be executed as a script mounted\n" + " # in the test container instead of being executed directly via bash\n" + " run_as_script: false\n" + + " # ServiceAccountTokens configures additional projected service account token\n" + + " # volumes with custom audiences, mounted into the step container. This is\n" + + " # useful for workloads that need to exchange tokens with external identity\n" + + " # providers (e.g., GCP Workload Identity Federation).\n" + + " service_account_tokens:\n" + + " - # Audience is the intended audience of the token. The token will only be\n" + + " # valid for recipients that identify themselves with this audience.\n" + + " audience: ' '\n" + + " # ExpirationSeconds is the requested duration of validity of the token,\n" + + " # in seconds. The kubelet will automatically rotate the token at 80% of\n" + + " # its TTL. Defaults to 3600 (1 hour) if not set.\n" + + " expiration_seconds: 0\n" + + " # MountPath is the path where the token will be mounted in the container.\n" + + " mount_path: ' '\n" + " # Timeout is how long the we will wait before aborting a job with SIGINT.\n" + " timeout: 0s\n" + " # Test is the array of test steps that define the actual test.\n" + @@ -1993,6 +2078,20 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # RunAsScript defines if this step should be executed as a script mounted\n" + " # in the test container instead of being executed directly via bash\n" + " run_as_script: false\n" + + " # ServiceAccountTokens configures additional projected service account token\n" + + " # volumes with custom audiences, mounted into the step container. This is\n" + + " # useful for workloads that need to exchange tokens with external identity\n" + + " # providers (e.g., GCP Workload Identity Federation).\n" + + " service_account_tokens:\n" + + " - # Audience is the intended audience of the token. The token will only be\n" + + " # valid for recipients that identify themselves with this audience.\n" + + " audience: ' '\n" + + " # ExpirationSeconds is the requested duration of validity of the token,\n" + + " # in seconds. The kubelet will automatically rotate the token at 80% of\n" + + " # its TTL. Defaults to 3600 (1 hour) if not set.\n" + + " expiration_seconds: 0\n" + + " # MountPath is the path where the token will be mounted in the container.\n" + + " mount_path: ' '\n" + " # Timeout is how long the we will wait before aborting a job with SIGINT.\n" + " timeout: 0s\n" + " # Override job timeout\n" + @@ -2195,6 +2294,11 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # LiteralTestStep is a full test step definition.\n" + " \"\": \"\"\n" + " run_as_script: false\n" + + " service_account_tokens:\n" + + " # LiteralTestStep is a full test step definition.\n" + + " - audience: ' '\n" + + " expiration_seconds: 0\n" + + " mount_path: ' '\n" + " timeout: 0s\n" + " # Pre is the array of test steps run to set up the environment for the test.\n" + " pre:\n" + @@ -2266,6 +2370,11 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # LiteralTestStep is a full test step definition.\n" + " \"\": \"\"\n" + " run_as_script: false\n" + + " service_account_tokens:\n" + + " # LiteralTestStep is a full test step definition.\n" + + " - audience: ' '\n" + + " expiration_seconds: 0\n" + + " mount_path: ' '\n" + " timeout: 0s\n" + " # Test is the array of test steps that define the actual test.\n" + " test:\n" + @@ -2337,6 +2446,11 @@ const ciOperatorReferenceYaml = "# The list of base images describe\n" + " # LiteralTestStep is a full test step definition.\n" + " \"\": \"\"\n" + " run_as_script: false\n" + + " service_account_tokens:\n" + + " # LiteralTestStep is a full test step definition.\n" + + " - audience: ' '\n" + + " expiration_seconds: 0\n" + + " mount_path: ' '\n" + " timeout: 0s\n" + " # Workflow is the name of the workflow to be used for this configuration. For fields defined in both\n" + " # the config and the workflow, the fields from the config will override what is set in Workflow.\n" +