-
Notifications
You must be signed in to change notification settings - Fork 314
Fix issue DPTP-4756 add STS hub-account role chaining for AWS cluster profiles #5094
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,7 +2,7 @@ package multi_stage | |
|
|
||
| import ( | ||
| "fmt" | ||
| "path/filepath" | ||
| "path" | ||
| "strings" | ||
|
|
||
| "github.com/sirupsen/logrus" | ||
|
|
@@ -25,6 +25,10 @@ const ( | |
| profileVolumeName = "cluster-profile" | ||
| vpnContainerName = "vpn-client" | ||
| leaseProxyScriptsMountPath = "/opt/scripts/lease-proxy" | ||
| stsTokenVolumeName = "aws-sts-token" | ||
| stsTokenMountPath = "/var/run/secrets/aws/sts-token" | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we pass the value of
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is consumed by the file generation, not by the tests: |
||
| stsConfigVolumeName = "aws-sts-config" | ||
| stsConfigMountPath = "/var/run/secrets/aws/config" | ||
| ) | ||
|
|
||
| func (s *multiStageTestStep) generateObservers( | ||
|
|
@@ -223,21 +227,25 @@ func (s *multiStageTestStep) generatePods( | |
| } | ||
| } else if needsKubeConfig { | ||
| container.Env = append(container.Env, []coreapi.EnvVar{ | ||
| {Name: "KUBECONFIG", Value: filepath.Join(SecretMountPath, "kubeconfig")}, | ||
| {Name: "KUBECONFIGMINIMAL", Value: filepath.Join(SecretMountPath, "kubeconfig-minimal")}, | ||
| {Name: "KUBEADMIN_PASSWORD_FILE", Value: filepath.Join(SecretMountPath, "kubeadmin-password")}, | ||
| {Name: "KUBECONFIG", Value: path.Join(SecretMountPath, "kubeconfig")}, | ||
| {Name: "KUBECONFIGMINIMAL", Value: path.Join(SecretMountPath, "kubeconfig-minimal")}, | ||
| {Name: "KUBEADMIN_PASSWORD_FILE", Value: path.Join(SecretMountPath, "kubeadmin-password")}, | ||
| }...) | ||
| } | ||
| shmSize := allResources.Requests.Name(api.ShmResource, resource.BinarySI) | ||
| if !shmSize.IsZero() { | ||
| addDshmVolume(shmSize, pod, container) | ||
| } | ||
| if s.profile != "" { | ||
| if !needsKubeConfig && s.stsHubRoleARN != "" && s.stsTargetRoleARN != "" { | ||
| errs = append(errs, fmt.Errorf("step %s sets no_kubeconfig but the test has STS enabled (hub_role_arn=%s, target_role_arn=%s); STS requires kubeconfig", step.As, s.stsHubRoleARN, s.stsTargetRoleARN)) | ||
| continue | ||
| } | ||
| profileSecret, err := s.profileSecretName() | ||
| if err != nil { | ||
| return nil, nil, fmt.Errorf("get profile secret name: %w", err) | ||
| } | ||
| addProfile(profileSecret, s.profile, pod) | ||
| addProfile(profileSecret, s.profile, s.stsHubRoleARN, s.stsTargetRoleARN, pod) | ||
| } | ||
|
Comment on lines
+248
to
249
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: # First, let's look at the file structure and find the key sections
fd pkg/steps/multi_stage/gen.go --type fRepository: openshift/ci-tools Length of output: 44 🏁 Script executed: # Read the relevant sections of gen.go
cat -n pkg/steps/multi_stage/gen.go | sed -n '28,31p; 155,162p; 240,250p; 510,520p; 520,570p'Repository: openshift/ci-tools Length of output: 3219 🏁 Script executed: # Also check the init.go file for setupRBAC
cat -n pkg/steps/multi_stage/init.go | grep -A 20 "func.*setupRBAC"Repository: openshift/ci-tools Length of output: 890 🏁 Script executed: # Check the addProfile function signature
ast-grep --pattern 'func addProfile($_, $_, $_, $_, $_) {
$$$
}'Repository: openshift/ci-tools Length of output: 44 🏁 Script executed: # Check where needsKubeConfig is determined
rg "isKubeconfigNeeded" pkg/steps/multi_stage/gen.go -A 5 -B 2Repository: openshift/ci-tools Length of output: 672 🏁 Script executed: # Check the generatePods function to understand the full flow
sed -n '100,250p' pkg/steps/multi_stage/gen.go | cat -nRepository: openshift/ci-tools Length of output: 7413 🏁 Script executed: # Check if there are other places where ServiceAccountName is modified
rg "ServiceAccountName" pkg/steps/multi_stage/gen.go -B 2 -A 2Repository: openshift/ci-tools Length of output: 292 🏁 Script executed: # Understand the relationship between addProfile and STS
sed -n '490,530p' pkg/steps/multi_stage/gen.go | cat -nRepository: openshift/ci-tools Length of output: 1633 🏁 Script executed: # Check if a no_kubeconfig step can have s.profile set (which enables STS if ARNs are present)
rg "s\.profile|no_kubeconfig" pkg/steps/multi_stage/ -B 2 -A 2 | head -100Repository: openshift/ci-tools Length of output: 2971 🏁 Script executed: # Check the setupRBAC function to confirm it creates SA named s.name
sed -n '251,280p' pkg/steps/multi_stage/init.go | cat -nRepository: openshift/ci-tools Length of output: 1291 STS-enabled pods can run under the wrong ServiceAccount for When a step has The fix is to select the test SA whenever STS is enabled, not only when kubeconfig is needed: Proposed fix- needsKubeConfig := isKubeconfigNeeded(&step, genPodOpts)
- if needsKubeConfig {
+ needsKubeConfig := isKubeconfigNeeded(&step, genPodOpts)
+ needsSTS := s.stsHubRoleARN != "" && s.stsTargetRoleARN != ""
+ if needsKubeConfig || needsSTS {
pod.Spec.ServiceAccountName = s.name
} else {
pod.Spec.ServiceAccountName = ""
no := false
pod.Spec.AutomountServiceAccountToken = &no
}Also applies to: 515-517 🤖 Prompt for AI Agents
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess this concerns is valid. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Fix: use test ServiceAccount for STS-enabled pods regardless of
|
||
| if step.Cli != "" { | ||
| dependency := api.StepDependency{Name: fmt.Sprintf("%s:cli", api.ReleaseStreamFor(step.Cli))} | ||
|
|
@@ -286,7 +294,7 @@ func isKubeconfigNeeded(step *api.LiteralTestStep, opts *generatePodOptions) boo | |
| func addSecretWrapper(pod *coreapi.Pod, vpnConf *vpnConf, skipKubeconfig bool, genPodOpts *generatePodOptions) { | ||
| volume := "entrypoint-wrapper" | ||
| dir := "/tmp/entrypoint-wrapper" | ||
| bin := filepath.Join(dir, "entrypoint-wrapper") | ||
| bin := path.Join(dir, "entrypoint-wrapper") | ||
| pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{ | ||
| Name: volume, | ||
| VolumeSource: coreapi.VolumeSource{ | ||
|
|
@@ -448,10 +456,10 @@ func getClusterClaimPodParams(secretVolumeMounts []coreapi.VolumeMount, testName | |
| foundMountPath = true | ||
| retMount = append(retMount, secretVolumeMount) | ||
| if secretName == api.HiveAdminKubeconfigSecret { | ||
| retEnv = append(retEnv, coreapi.EnvVar{Name: "KUBECONFIG", Value: filepath.Join(secretVolumeMount.MountPath, api.HiveAdminKubeconfigSecretKey)}) | ||
| retEnv = append(retEnv, coreapi.EnvVar{Name: "KUBECONFIG", Value: path.Join(secretVolumeMount.MountPath, api.HiveAdminKubeconfigSecretKey)}) | ||
| } | ||
| if secretName == api.HiveAdminPasswordSecret { | ||
| retEnv = append(retEnv, coreapi.EnvVar{Name: "KUBEADMIN_PASSWORD_FILE", Value: filepath.Join(secretVolumeMount.MountPath, api.HiveAdminPasswordSecretKey)}) | ||
| retEnv = append(retEnv, coreapi.EnvVar{Name: "KUBEADMIN_PASSWORD_FILE", Value: path.Join(secretVolumeMount.MountPath, api.HiveAdminPasswordSecretKey)}) | ||
| } | ||
| break | ||
| } | ||
|
|
@@ -483,7 +491,7 @@ func addDshmVolume(shmSize *resource.Quantity, pod *coreapi.Pod, container *core | |
| }) | ||
| } | ||
|
|
||
| func addProfile(name string, profile api.ClusterProfile, pod *coreapi.Pod) { | ||
| func addProfile(name string, profile api.ClusterProfile, hubRoleARN, targetRoleARN string, pod *coreapi.Pod) { | ||
| pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{ | ||
| Name: profileVolumeName, | ||
| VolumeSource: coreapi.VolumeSource{ | ||
|
|
@@ -507,6 +515,59 @@ func addProfile(name string, profile api.ClusterProfile, pod *coreapi.Pod) { | |
| Name: ClusterProfileMountEnv, | ||
| Value: ClusterProfileMountPath, | ||
| }}...) | ||
|
|
||
| if hubRoleARN != "" && targetRoleARN != "" { | ||
| addSTSVolumes(pod) | ||
| } | ||
| } | ||
|
|
||
| func addSTSVolumes(pod *coreapi.Pod) { | ||
| expirationSeconds := int64(86400) | ||
|
|
||
| pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{ | ||
| Name: stsTokenVolumeName, | ||
| VolumeSource: coreapi.VolumeSource{ | ||
| Projected: &coreapi.ProjectedVolumeSource{ | ||
| Sources: []coreapi.VolumeProjection{{ | ||
| ServiceAccountToken: &coreapi.ServiceAccountTokenProjection{ | ||
| Audience: "sts.amazonaws.com", | ||
| ExpirationSeconds: &expirationSeconds, | ||
| Path: "token", | ||
| }, | ||
| }}, | ||
| }, | ||
| }, | ||
| }) | ||
|
|
||
| stsConfigCMName := stsConfigMapName(pod.Labels[MultiStageTestLabel]) | ||
| pod.Spec.Volumes = append(pod.Spec.Volumes, coreapi.Volume{ | ||
| Name: stsConfigVolumeName, | ||
| VolumeSource: coreapi.VolumeSource{ | ||
| ConfigMap: &coreapi.ConfigMapVolumeSource{ | ||
| LocalObjectReference: coreapi.LocalObjectReference{ | ||
| Name: stsConfigCMName, | ||
| }, | ||
| }, | ||
| }, | ||
| }) | ||
|
|
||
| container := &pod.Spec.Containers[0] | ||
| container.VolumeMounts = append(container.VolumeMounts, | ||
| coreapi.VolumeMount{ | ||
| Name: stsTokenVolumeName, | ||
| MountPath: stsTokenMountPath, | ||
| ReadOnly: true, | ||
| }, | ||
| coreapi.VolumeMount{ | ||
| Name: stsConfigVolumeName, | ||
| MountPath: stsConfigMountPath, | ||
| ReadOnly: true, | ||
| }, | ||
| ) | ||
| container.Env = append(container.Env, | ||
| coreapi.EnvVar{Name: "AWS_CONFIG_FILE", Value: path.Join(stsConfigMountPath, "config")}, | ||
| coreapi.EnvVar{Name: "AWS_SDK_LOAD_CONFIG", Value: "1"}, | ||
| ) | ||
| } | ||
|
|
||
| func addCliInjector(imagestream string, pod *coreapi.Pod) { | ||
|
|
@@ -527,7 +588,7 @@ func addCliInjector(imagestream string, pod *coreapi.Pod) { | |
| // this line to pick appropriate oc version (i.e. oc.rhel9). | ||
| // Additionally, we need to check the existence of path because releases < 4.15 does not have oc.rhel8, | ||
| // and we fall back to old path due to backwards compatibility. | ||
| Args: []string{"-c", fmt.Sprintf("ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/'); if [[ -e /usr/share/openshift/linux_${ARCH}/oc.rhel8 ]]; then /bin/cp /usr/share/openshift/linux_${ARCH}/oc.rhel8 %s; else /bin/cp /usr/share/openshift/linux_${ARCH}/oc %s; fi", filepath.Join(CliMountPath, "oc"), CliMountPath)}, | ||
| Args: []string{"-c", fmt.Sprintf("ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/'); if [[ -e /usr/share/openshift/linux_${ARCH}/oc.rhel8 ]]; then /bin/cp /usr/share/openshift/linux_${ARCH}/oc.rhel8 %s; else /bin/cp /usr/share/openshift/linux_${ARCH}/oc %s; fi", path.Join(CliMountPath, "oc"), CliMountPath)}, | ||
| VolumeMounts: []coreapi.VolumeMount{{ | ||
| Name: volumeName, | ||
| MountPath: CliMountPath, | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is fine, we should modify this /ci-operator/step-registry/cluster-profiles/cluster-profiles-config.yaml then.