Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions pkg/api/constant.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ const (
ClusterProfileSetEnv = "CLUSTER_PROFILE_SET_NAME"
ClusterProfileParam = "CLUSTER_PROFILE"
ClusterProfileSecretNameParam = "CLUSTER_PROFILE_SECRET_NAME"
STSHubRoleARNParam = "CI_STS_HUB_ROLE_ARN"
STSTargetRoleARNParam = "CI_STS_TARGET_ROLE_ARN"

// SkipCensoringLabel is the label we use to mark a secret as not needing to be censored
SkipCensoringLabel = "ci.openshift.io/skip-censoring"
Expand Down
14 changes: 8 additions & 6 deletions pkg/api/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -3023,12 +3023,14 @@ func (cpl *ClusterProfilesList) Resolve() error {
type ClusterProfilesMap map[ClusterProfile]ClusterProfileDetails

type ClusterProfileDetails struct {
Name ClusterProfile `yaml:"name,omitempty" json:"name,omitempty"`
Owners []ClusterProfileOwners `yaml:"owners,omitempty" json:"owners,omitempty"`
ClusterType string `yaml:"cluster_type,omitempty" json:"cluster_type,omitempty"`
LeaseType string `yaml:"lease_type,omitempty" json:"lease_type,omitempty"`
Secret string `yaml:"secret,omitempty" json:"secret,omitempty"`
ConfigMap string `yaml:"config_map,omitempty" json:"config_map,omitempty"`
Name ClusterProfile `yaml:"name,omitempty" json:"name,omitempty"`
Owners []ClusterProfileOwners `yaml:"owners,omitempty" json:"owners,omitempty"`
ClusterType string `yaml:"cluster_type,omitempty" json:"cluster_type,omitempty"`
LeaseType string `yaml:"lease_type,omitempty" json:"lease_type,omitempty"`
Secret string `yaml:"secret,omitempty" json:"secret,omitempty"`
ConfigMap string `yaml:"config_map,omitempty" json:"config_map,omitempty"`
HubRoleARN string `yaml:"hub_role_arn,omitempty" json:"hub_role_arn,omitempty"`
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TargetRoleARN string `yaml:"target_role_arn,omitempty" json:"target_role_arn,omitempty"`
}

type ClusterProfileKonfluxOwner struct {
Expand Down
9 changes: 9 additions & 0 deletions pkg/steps/lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ type leaseStep struct {
clusterProfileSetName string
clusterProfileName string
clusterProfileSecretName string
stsHubRoleARN string
stsTargetRoleARN string
}

func LeaseStep(client *lease.Client, leases []api.StepLease, wrapped api.Step, namespace func() string, metricsAgent *metrics.MetricsAgent,
Expand Down Expand Up @@ -94,6 +96,10 @@ func (s *leaseStep) Provides() api.ParameterMap {
parameters[api.ClusterProfileParam] = func() (string, error) { return s.clusterProfileName, nil }
// nolint:unparam
parameters[api.ClusterProfileSecretNameParam] = func() (string, error) { return s.clusterProfileSecretName, nil }
// nolint:unparam
parameters[api.STSHubRoleARNParam] = func() (string, error) { return s.stsHubRoleARN, nil }
// nolint:unparam
parameters[api.STSTargetRoleARNParam] = func() (string, error) { return s.stsTargetRoleARN, nil }

for _, l := range s.leases {
// nolint:unparam
Expand Down Expand Up @@ -274,6 +280,9 @@ func (s *leaseStep) handleClusterProfile(ctx context.Context, l *stepLease, name
return fmt.Errorf("resolve cluster profile %s: %w", s.clusterProfileName, err)
}

s.stsHubRoleARN = cpDetails.HubRoleARN
s.stsTargetRoleARN = cpDetails.TargetRoleARN

if err := s.importClusterProfileSecret(ctx, cpDetails.Secret); err != nil {
return fmt.Errorf("import secret %s for cluster profile %s: %w", cpDetails.Secret, s.clusterProfileName, err)
}
Expand Down
77 changes: 77 additions & 0 deletions pkg/steps/lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,8 @@ func TestAcquireLeases(t *testing.T) {
api.ClusterProfileSetEnv: "",
api.ClusterProfileParam: "",
api.ClusterProfileSecretNameParam: "",
api.STSHubRoleARNParam: "",
api.STSTargetRoleARNParam: "",
"lease-0": "res-type-0",
"lease-1": "res-type-1",
"parameter": "map",
Expand Down Expand Up @@ -341,6 +343,8 @@ func TestAcquireLeases(t *testing.T) {
api.ClusterProfileSetEnv: "",
api.ClusterProfileParam: "aws",
api.ClusterProfileSecretNameParam: "cluster-secrets-aws",
api.STSHubRoleARNParam: "",
api.STSTargetRoleARNParam: "",
api.DefaultLeaseEnv: "us-east-1",
},
wantSecrets: corev1.SecretList{
Expand Down Expand Up @@ -416,6 +420,8 @@ func TestAcquireLeases(t *testing.T) {
api.ClusterProfileSetEnv: "",
api.ClusterProfileParam: "aws",
api.ClusterProfileSecretNameParam: "cluster-secrets-aws",
api.STSHubRoleARNParam: "",
api.STSTargetRoleARNParam: "",
api.DefaultLeaseEnv: "us-east-1",
"FOOBAR_RESOURCE": "foobar-res-0",
},
Expand Down Expand Up @@ -453,6 +459,75 @@ func TestAcquireLeases(t *testing.T) {
"releaseone owner foobar-res-0 free",
},
},
{
name: "Cluster profile lease with STS",
leases: []api.StepLease{{
ResourceType: "aws",
Env: api.DefaultLeaseEnv,
Count: 1,
ClusterProfile: "aws",
}},
resources: map[string]*common.Resource{
"acquireWaitWithPriority_aws_free_leased_random": {
Name: "us-east-1--aws-quota-slice-0",
},
},
objects: []ctrlruntimeclient.Object{&corev1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: "ci",
Name: "cluster-secrets-aws",
},
Data: map[string][]byte{
"k1": []byte("v1"),
},
}},
clusterProfiles: map[string]*api.ClusterProfileDetails{
"aws": {
Secret: "cluster-secrets-aws",
LeaseType: "aws-quota-slice",
HubRoleARN: "arn:aws:iam::111:role/hub",
TargetRoleARN: "arn:aws:iam::222:role/target",
},
},
wantProvides: map[string]string{
"parameter": "map",
api.ClusterProfileSetEnv: "",
api.ClusterProfileParam: "aws",
api.ClusterProfileSecretNameParam: "cluster-secrets-aws",
api.STSHubRoleARNParam: "arn:aws:iam::111:role/hub",
api.STSTargetRoleARNParam: "arn:aws:iam::222:role/target",
api.DefaultLeaseEnv: "us-east-1",
},
wantSecrets: corev1.SecretList{
Items: []corev1.Secret{
{
ObjectMeta: v1.ObjectMeta{
Namespace: "ci",
Name: "cluster-secrets-aws",
ResourceVersion: "999",
},
Data: map[string][]byte{
"k1": []byte("v1"),
},
},
{
ObjectMeta: v1.ObjectMeta{
Namespace: ns,
Name: "cluster-secrets-aws",
ResourceVersion: "1",
},
Data: map[string][]byte{
"k1": []byte("v1"),
},
Immutable: ptr.To(true),
},
},
},
wantCalls: []string{
"acquireWaitWithPriority owner aws free leased random",
"releaseone owner us-east-1--aws-quota-slice-0 free",
},
},
{
name: "Nested cluster profile",
leases: []api.StepLease{{
Expand Down Expand Up @@ -490,6 +565,8 @@ func TestAcquireLeases(t *testing.T) {
api.ClusterProfileSetEnv: "aws-set",
api.ClusterProfileParam: "aws",
api.ClusterProfileSecretNameParam: "cluster-secrets-aws",
api.STSHubRoleARNParam: "",
api.STSTargetRoleARNParam: "",
api.DefaultLeaseEnv: "us-east-1",
},
wantSecrets: corev1.SecretList{
Expand Down
81 changes: 71 additions & 10 deletions pkg/steps/multi_stage/gen.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ package multi_stage

import (
"fmt"
"path/filepath"
"path"
"strings"

"github.com/sirupsen/logrus"
Expand All @@ -25,6 +25,10 @@ const (
profileVolumeName = "cluster-profile"
vpnContainerName = "vpn-client"
leaseProxyScriptsMountPath = "/opt/scripts/lease-proxy"
stsTokenVolumeName = "aws-sts-token"
stsTokenMountPath = "/var/run/secrets/aws/sts-token"
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we pass the value of stsTokenMountPath down to the test pod as an env variable?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is consumed by the file generation, not by the tests:

	configContent := fmt.Sprintf("[profile hub]\nweb_identity_token_file = /var/run/secrets/aws/sts-token/token\nrole_arn = %s\n\n[default]\nrole_arn = %s\nsource_profile = hub\n", s.stsHubRoleARN, s.stsTargetRoleARN)

stsConfigVolumeName = "aws-sts-config"
stsConfigMountPath = "/var/run/secrets/aws/config"
)

func (s *multiStageTestStep) generateObservers(
Expand Down Expand Up @@ -223,21 +227,25 @@ func (s *multiStageTestStep) generatePods(
}
} else if needsKubeConfig {
container.Env = append(container.Env, []coreapi.EnvVar{
{Name: "KUBECONFIG", Value: filepath.Join(SecretMountPath, "kubeconfig")},
{Name: "KUBECONFIGMINIMAL", Value: filepath.Join(SecretMountPath, "kubeconfig-minimal")},
{Name: "KUBEADMIN_PASSWORD_FILE", Value: filepath.Join(SecretMountPath, "kubeadmin-password")},
{Name: "KUBECONFIG", Value: path.Join(SecretMountPath, "kubeconfig")},
{Name: "KUBECONFIGMINIMAL", Value: path.Join(SecretMountPath, "kubeconfig-minimal")},
{Name: "KUBEADMIN_PASSWORD_FILE", Value: path.Join(SecretMountPath, "kubeadmin-password")},
}...)
}
shmSize := allResources.Requests.Name(api.ShmResource, resource.BinarySI)
if !shmSize.IsZero() {
addDshmVolume(shmSize, pod, container)
}
if s.profile != "" {
if !needsKubeConfig && s.stsHubRoleARN != "" && s.stsTargetRoleARN != "" {
errs = append(errs, fmt.Errorf("step %s sets no_kubeconfig but the test has STS enabled (hub_role_arn=%s, target_role_arn=%s); STS requires kubeconfig", step.As, s.stsHubRoleARN, s.stsTargetRoleARN))
continue
}
profileSecret, err := s.profileSecretName()
if err != nil {
return nil, nil, fmt.Errorf("get profile secret name: %w", err)
}
addProfile(profileSecret, s.profile, pod)
addProfile(profileSecret, s.profile, s.stsHubRoleARN, s.stsTargetRoleARN, pod)
}
Comment on lines +248 to 249
Copy link
Copy Markdown

@coderabbitai coderabbitai Bot Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

# First, let's look at the file structure and find the key sections
fd pkg/steps/multi_stage/gen.go --type f

Repository: openshift/ci-tools

Length of output: 44


🏁 Script executed:

# Read the relevant sections of gen.go
cat -n pkg/steps/multi_stage/gen.go | sed -n '28,31p; 155,162p; 240,250p; 510,520p; 520,570p'

Repository: openshift/ci-tools

Length of output: 3219


🏁 Script executed:

# Also check the init.go file for setupRBAC
cat -n pkg/steps/multi_stage/init.go | grep -A 20 "func.*setupRBAC"

Repository: openshift/ci-tools

Length of output: 890


🏁 Script executed:

# Check the addProfile function signature
ast-grep --pattern 'func addProfile($_, $_, $_, $_, $_) {
  $$$
}'

Repository: openshift/ci-tools

Length of output: 44


🏁 Script executed:

# Check where needsKubeConfig is determined
rg "isKubeconfigNeeded" pkg/steps/multi_stage/gen.go -A 5 -B 2

Repository: openshift/ci-tools

Length of output: 672


🏁 Script executed:

# Check the generatePods function to understand the full flow
sed -n '100,250p' pkg/steps/multi_stage/gen.go | cat -n

Repository: openshift/ci-tools

Length of output: 7413


🏁 Script executed:

# Check if there are other places where ServiceAccountName is modified
rg "ServiceAccountName" pkg/steps/multi_stage/gen.go -B 2 -A 2

Repository: openshift/ci-tools

Length of output: 292


🏁 Script executed:

# Understand the relationship between addProfile and STS
sed -n '490,530p' pkg/steps/multi_stage/gen.go | cat -n

Repository: openshift/ci-tools

Length of output: 1633


🏁 Script executed:

# Check if a no_kubeconfig step can have s.profile set (which enables STS if ARNs are present)
rg "s\.profile|no_kubeconfig" pkg/steps/multi_stage/ -B 2 -A 2 | head -100

Repository: openshift/ci-tools

Length of output: 2971


🏁 Script executed:

# Check the setupRBAC function to confirm it creates SA named s.name
sed -n '251,280p' pkg/steps/multi_stage/init.go | cat -n

Repository: openshift/ci-tools

Length of output: 1291


STS-enabled pods can run under the wrong ServiceAccount for no_kubeconfig steps.

When a step has no_kubeconfig enabled, the pod's ServiceAccountName is set to empty string (defaults to the default SA), with automount disabled. However, if the step also has a cluster profile with STS enabled (both stsHubRoleARN and stsTargetRoleARN non-empty), the addProfile function calls addSTSVolumes, which creates a ServiceAccountTokenProjection for STS. This projection will mint a token from the default SA, not the test SA (s.name) that setupRBAC creates and configures for AWS trust. This causes an IAM trust mismatch and breaks STS authentication.

The fix is to select the test SA whenever STS is enabled, not only when kubeconfig is needed:

Proposed fix
-		needsKubeConfig := isKubeconfigNeeded(&step, genPodOpts)
-		if needsKubeConfig {
+		needsKubeConfig := isKubeconfigNeeded(&step, genPodOpts)
+		needsSTS := s.stsHubRoleARN != "" && s.stsTargetRoleARN != ""
+		if needsKubeConfig || needsSTS {
 			pod.Spec.ServiceAccountName = s.name
 		} else {
 			pod.Spec.ServiceAccountName = ""
 			no := false
 			pod.Spec.AutomountServiceAccountToken = &no
 		}

Also applies to: 515-517

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@pkg/steps/multi_stage/gen.go` around lines 244 - 245, The bug is that
STS-enabled steps with no_kubeconfig still leave the pod ServiceAccountName
empty, causing the ServiceAccountTokenProjection created by addSTSVolumes
(called from addProfile) to mint a token from the default SA instead of the test
SA that setupRBAC creates; update the logic in addProfile (and the same pattern
at the other occurrence noted) to set pod.Spec.ServiceAccountName = s.name
whenever both s.stsHubRoleARN and s.stsTargetRoleARN are non-empty (i.e., STS
enabled), regardless of no_kubeconfig, so the ServiceAccountTokenProjection uses
the test SA configured by setupRBAC.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess this concerns is valid.

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danilo-gemoli, agreed — let me plan the fix.

Fix: use test ServiceAccount for STS-enabled pods regardless of no_kubeconfig

  • In generatePods, introduce a needsSTS guard alongside needsKubeConfig
  • When either is true, keep pod.Spec.ServiceAccountName = s.name and leave automount alone
  • This ensures the ServiceAccountTokenProjection added by addSTSVolumes mints a token from the test SA that setupRBAC has wired to the IAM trust policy

Failed to handle agent chat message. Please try again.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this has been conditioned now

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems like the humans are having a chat. I'll hop back into my burrow for now. If you need me again, just tag @coderabbitai in a new comment, and I'll come hopping out!

if step.Cli != "" {
dependency := api.StepDependency{Name: fmt.Sprintf("%s:cli", api.ReleaseStreamFor(step.Cli))}
Expand Down Expand Up @@ -286,7 +294,7 @@ func isKubeconfigNeeded(step *api.LiteralTestStep, opts *generatePodOptions) boo
func addSecretWrapper(pod *coreapi.Pod, vpnConf *vpnConf, skipKubeconfig bool, genPodOpts *generatePodOptions) {
volume := "entrypoint-wrapper"
dir := "/tmp/entrypoint-wrapper"
bin := filepath.Join(dir, "entrypoint-wrapper")
bin := path.Join(dir, "entrypoint-wrapper")
pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{
Name: volume,
VolumeSource: coreapi.VolumeSource{
Expand Down Expand Up @@ -448,10 +456,10 @@ func getClusterClaimPodParams(secretVolumeMounts []coreapi.VolumeMount, testName
foundMountPath = true
retMount = append(retMount, secretVolumeMount)
if secretName == api.HiveAdminKubeconfigSecret {
retEnv = append(retEnv, coreapi.EnvVar{Name: "KUBECONFIG", Value: filepath.Join(secretVolumeMount.MountPath, api.HiveAdminKubeconfigSecretKey)})
retEnv = append(retEnv, coreapi.EnvVar{Name: "KUBECONFIG", Value: path.Join(secretVolumeMount.MountPath, api.HiveAdminKubeconfigSecretKey)})
}
if secretName == api.HiveAdminPasswordSecret {
retEnv = append(retEnv, coreapi.EnvVar{Name: "KUBEADMIN_PASSWORD_FILE", Value: filepath.Join(secretVolumeMount.MountPath, api.HiveAdminPasswordSecretKey)})
retEnv = append(retEnv, coreapi.EnvVar{Name: "KUBEADMIN_PASSWORD_FILE", Value: path.Join(secretVolumeMount.MountPath, api.HiveAdminPasswordSecretKey)})
}
break
}
Expand Down Expand Up @@ -483,7 +491,7 @@ func addDshmVolume(shmSize *resource.Quantity, pod *coreapi.Pod, container *core
})
}

func addProfile(name string, profile api.ClusterProfile, pod *coreapi.Pod) {
func addProfile(name string, profile api.ClusterProfile, hubRoleARN, targetRoleARN string, pod *coreapi.Pod) {
pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{
Name: profileVolumeName,
VolumeSource: coreapi.VolumeSource{
Expand All @@ -507,6 +515,59 @@ func addProfile(name string, profile api.ClusterProfile, pod *coreapi.Pod) {
Name: ClusterProfileMountEnv,
Value: ClusterProfileMountPath,
}}...)

if hubRoleARN != "" && targetRoleARN != "" {
addSTSVolumes(pod)
}
}

func addSTSVolumes(pod *coreapi.Pod) {
expirationSeconds := int64(86400)

pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{
Name: stsTokenVolumeName,
VolumeSource: coreapi.VolumeSource{
Projected: &coreapi.ProjectedVolumeSource{
Sources: []coreapi.VolumeProjection{{
ServiceAccountToken: &coreapi.ServiceAccountTokenProjection{
Audience: "sts.amazonaws.com",
ExpirationSeconds: &expirationSeconds,
Path: "token",
},
}},
},
},
})

stsConfigCMName := stsConfigMapName(pod.Labels[MultiStageTestLabel])
pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{
Name: stsConfigVolumeName,
VolumeSource: coreapi.VolumeSource{
ConfigMap: &coreapi.ConfigMapVolumeSource{
LocalObjectReference: coreapi.LocalObjectReference{
Name: stsConfigCMName,
},
},
},
})

container := &pod.Spec.Containers[0]
container.VolumeMounts = append(container.VolumeMounts,
coreapi.VolumeMount{
Name: stsTokenVolumeName,
MountPath: stsTokenMountPath,
ReadOnly: true,
},
coreapi.VolumeMount{
Name: stsConfigVolumeName,
MountPath: stsConfigMountPath,
ReadOnly: true,
},
)
container.Env = append(container.Env,
coreapi.EnvVar{Name: "AWS_CONFIG_FILE", Value: path.Join(stsConfigMountPath, "config")},
coreapi.EnvVar{Name: "AWS_SDK_LOAD_CONFIG", Value: "1"},
)
}

func addCliInjector(imagestream string, pod *coreapi.Pod) {
Expand All @@ -527,7 +588,7 @@ func addCliInjector(imagestream string, pod *coreapi.Pod) {
// this line to pick appropriate oc version (i.e. oc.rhel9).
// Additionally, we need to check the existence of path because releases < 4.15 does not have oc.rhel8,
// and we fall back to old path due to backwards compatibility.
Args: []string{"-c", fmt.Sprintf("ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/'); if [[ -e /usr/share/openshift/linux_${ARCH}/oc.rhel8 ]]; then /bin/cp /usr/share/openshift/linux_${ARCH}/oc.rhel8 %s; else /bin/cp /usr/share/openshift/linux_${ARCH}/oc %s; fi", filepath.Join(CliMountPath, "oc"), CliMountPath)},
Args: []string{"-c", fmt.Sprintf("ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/'); if [[ -e /usr/share/openshift/linux_${ARCH}/oc.rhel8 ]]; then /bin/cp /usr/share/openshift/linux_${ARCH}/oc.rhel8 %s; else /bin/cp /usr/share/openshift/linux_${ARCH}/oc %s; fi", path.Join(CliMountPath, "oc"), CliMountPath)},
VolumeMounts: []coreapi.VolumeMount{{
Name: volumeName,
MountPath: CliMountPath,
Expand Down
34 changes: 34 additions & 0 deletions pkg/steps/multi_stage/gen_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ func TestGeneratePods(t *testing.T) {
secretVolumeMounts []coreapi.VolumeMount
leaseProxyServerAvailable bool
paramsFunc func() api.Parameters
stsHubRoleARN string
stsTargetRoleARN string
}{
{
name: "generate pods",
Expand Down Expand Up @@ -161,6 +163,36 @@ func TestGeneratePods(t *testing.T) {
},
leaseProxyServerAvailable: true,
},
{
name: "STS volumes",
config: &api.ReleaseBuildConfiguration{
Tests: []api.TestStepConfiguration{{
As: "e2e-aws",
MultiStageTestConfigurationLiteral: &api.MultiStageTestConfigurationLiteral{
ClusterProfile: api.ClusterProfileAWS,
Test: []api.LiteralTestStep{{
As: "step0", From: "src", Commands: "command0",
Timeout: &prowapi.Duration{Duration: time.Hour},
GracePeriod: &prowapi.Duration{Duration: 20 * time.Second},
}},
}},
},
},
env: []coreapi.EnvVar{
{Name: "RELEASE_IMAGE_INITIAL", Value: "release:initial"},
{Name: "RELEASE_IMAGE_LATEST", Value: "release:latest"},
{Name: "LEASED_RESOURCE", Value: "uuid"},
},
paramsFunc: func() api.Parameters {
params := api.NewDeferredParameters(nil)
params.Add(api.ClusterProfileSecretNameParam, func() (string, error) {
return "cluster-secrets-aws-5", nil
})
return params
},
stsHubRoleARN: "arn:aws:iam::111111111111:role/ci-step-runner",
stsTargetRoleARN: "arn:aws:iam::222222222222:role/ci-profile-aws-5",
},
} {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
Expand All @@ -172,6 +204,8 @@ func TestGeneratePods(t *testing.T) {

js := jobSpec()
step := newMultiStageTestStep(tc.config.Tests[0], tc.config, params, nil, &js, nil, "node-name", "", nil, false, nil, tc.leaseProxyServerAvailable, wait.Backoff{})
step.stsHubRoleARN = tc.stsHubRoleARN
step.stsTargetRoleARN = tc.stsTargetRoleARN
step.test[0].Resources = resourceRequirements

ret, _, err := step.generatePods(tc.config.Tests[0].MultiStageTestConfigurationLiteral.Test, tc.env, tc.secretVolumes, tc.secretVolumeMounts, nil)
Expand Down
Loading