diff --git a/apis/rds/v1alpha1/custom_types.go b/apis/rds/v1alpha1/custom_types.go index 12736d0e09..9a428eb705 100644 --- a/apis/rds/v1alpha1/custom_types.go +++ b/apis/rds/v1alpha1/custom_types.go @@ -676,6 +676,72 @@ type CustomDBInstanceParameters struct { // +optional SkipFinalSnapshot bool `json:"skipFinalSnapshot,omitempty"` + // The identifier of the Multi-AZ DB cluster that will act as the source for + // the read replica. Each DB cluster can have up to 15 read replicas. + // + // Constraints: + // + // * Must be the identifier of an existing Multi-AZ DB cluster. + // + // * Can't be specified if the SourceDBInstanceIdentifier parameter is also + // specified. + // + // * The specified DB cluster must have automatic backups enabled, that is, + // its backup retention period must be greater than 0. + // + // * The source DB cluster must be in the same Amazon Web Services Region + // as the read replica. Cross-Region replication isn't supported. + // +immutable + // +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/rds/v1alpha1.DBCluster + ReplicateSourceDBClusterID *string `json:"replicateSourceDBClusterID,omitempty"` + + // ReplicateSourceDBClusterIDRef is a reference to a DBCluster used to set + // ReplicateSourceDBClusterID. + // +optional + ReplicateSourceDBClusterIDRef *xpv1.Reference `json:"replicateSourceDBClusterIDRef,omitempty"` + + // ReplicateSourceDBClusterIDSelector selects a reference to a DBCluster used to + // set ReplicateSourceDBClusterID. + // +optional + ReplicateSourceDBClusterIDSelector *xpv1.Selector `json:"replicateSourceDBClusterIDSelector,omitempty"` + + // The identifier of the DB instance that will act as the source for the read + // replica. Each DB instance can have up to 15 read replicas, with the exception of + // Oracle and SQL Server, which can have up to five. + // + // Constraints: + // + // - Must be the identifier of an existing Db2, MariaDB, MySQL, Oracle, + // PostgreSQL, or SQL Server DB instance. + // + // * Can't be specified if the SourceDBClusterIdentifier parameter is also + // specified. + // + // - For the limitations of Oracle read replicas, see [Version and licensing considerations for RDS for Oracle replicas]in the Amazon RDS User + // Guide. + // + // - For the limitations of SQL Server read replicas, see [Read replica limitations with SQL Server]in the Amazon RDS User + // Guide. + // + // - The specified DB instance must have automatic backups enabled, that is, its + // backup retention period must be greater than 0. + // + // [Read replica limitations with SQL Server]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.ReadReplicas.html#SQLServer.ReadReplicas.Limitations + // [Version and licensing considerations for RDS for Oracle replicas]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.limitations.html#oracle-read-replicas.limitations.versions-and-licenses + // +immutable + // +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/rds/v1alpha1.DBInstance + ReplicateSourceDBInstanceID *string `json:"replicateSourceDBInstanceID,omitempty"` + + // ReplicateSourceDBInstanceIDRef is a reference to a DBInstance used to set + // ReplicateSourceDBInstanceID. + // +optional + ReplicateSourceDBInstanceIDRef *xpv1.Reference `json:"replicateSourceDBInstanceIDRef,omitempty"` + + // ReplicateSourceDBInstanceIDSelector selects a reference to a DBInstance used to + // set ReplicateSourceDBInstanceID. + // +optional + ReplicateSourceDBInstanceIDSelector *xpv1.Selector `json:"replicateSourceDBInstanceIDSelector,omitempty"` + // A list of Amazon EC2 VPC security groups to authorize on this DB instance. // This change is asynchronously applied as soon as possible. // @@ -741,10 +807,21 @@ type CustomDBInstanceParameters struct { // deleted. // +optional DeleteAutomatedBackups *bool `json:"deleteAutomatedBackups,omitempty"` + + // TagIgnorePrefixes defines a list of tag key prefixes that should be ignored + // during tag comparison and updates by the reconciler. + // +optional + TagIgnorePrefixes []string `json:"tagIgnorePrefixes,omitempty"` } // CustomDBInstanceObservation includes the custom status fields of DBInstance. -type CustomDBInstanceObservation struct{} +type CustomDBInstanceObservation struct { + // AWS API calls don't return any field which explicitly indicates the role of database, which would be really convenient. + // DatabaseRole works on the similar principle as the Role field in AWS UI("Aurora and RDS" > "Databases"). + + // The database role may be Standalone, Primary or Replica. + DatabaseRole *string `json:"databaseRole,omitempty"` +} // CustomDBInstanceRoleAssociationParameters are custom parameters for the DBInstanceRoleAssociation type CustomDBInstanceRoleAssociationParameters struct { diff --git a/apis/rds/v1alpha1/zz_generated.deepcopy.go b/apis/rds/v1alpha1/zz_generated.deepcopy.go index 035c7e9938..4e64d876f1 100644 --- a/apis/rds/v1alpha1/zz_generated.deepcopy.go +++ b/apis/rds/v1alpha1/zz_generated.deepcopy.go @@ -606,6 +606,11 @@ func (in *CustomDBEngineVersionAMI) DeepCopy() *CustomDBEngineVersionAMI { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomDBInstanceObservation) DeepCopyInto(out *CustomDBInstanceObservation) { *out = *in + if in.DatabaseRole != nil { + in, out := &in.DatabaseRole, &out.DatabaseRole + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDBInstanceObservation. @@ -676,6 +681,36 @@ func (in *CustomDBInstanceParameters) DeepCopyInto(out *CustomDBInstanceParamete *out = new(v1.Selector) (*in).DeepCopyInto(*out) } + if in.ReplicateSourceDBClusterID != nil { + in, out := &in.ReplicateSourceDBClusterID, &out.ReplicateSourceDBClusterID + *out = new(string) + **out = **in + } + if in.ReplicateSourceDBClusterIDRef != nil { + in, out := &in.ReplicateSourceDBClusterIDRef, &out.ReplicateSourceDBClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReplicateSourceDBClusterIDSelector != nil { + in, out := &in.ReplicateSourceDBClusterIDSelector, &out.ReplicateSourceDBClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReplicateSourceDBInstanceID != nil { + in, out := &in.ReplicateSourceDBInstanceID, &out.ReplicateSourceDBInstanceID + *out = new(string) + **out = **in + } + if in.ReplicateSourceDBInstanceIDRef != nil { + in, out := &in.ReplicateSourceDBInstanceIDRef, &out.ReplicateSourceDBInstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ReplicateSourceDBInstanceIDSelector != nil { + in, out := &in.ReplicateSourceDBInstanceIDSelector, &out.ReplicateSourceDBInstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } if in.VPCSecurityGroupIDs != nil { in, out := &in.VPCSecurityGroupIDs, &out.VPCSecurityGroupIDs *out = make([]string, len(*in)) @@ -728,6 +763,11 @@ func (in *CustomDBInstanceParameters) DeepCopyInto(out *CustomDBInstanceParamete *out = new(bool) **out = **in } + if in.TagIgnorePrefixes != nil { + in, out := &in.TagIgnorePrefixes, &out.TagIgnorePrefixes + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDBInstanceParameters. @@ -3556,7 +3596,7 @@ func (in *DBInstanceObservation) DeepCopyInto(out *DBInstanceObservation) { } } } - out.CustomDBInstanceObservation = in.CustomDBInstanceObservation + in.CustomDBInstanceObservation.DeepCopyInto(&out.CustomDBInstanceObservation) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBInstanceObservation. diff --git a/examples/rds/db-instance-read-replica.yaml b/examples/rds/db-instance-read-replica.yaml new file mode 100644 index 0000000000..99906f2c5d --- /dev/null +++ b/examples/rds/db-instance-read-replica.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: rds.aws.crossplane.io/v1alpha1 +kind: DBInstance +metadata: + name: example-read-replica-1 +spec: + forProvider: + region: eu-central-1 + replicateSourceDBInstanceID: example-dbinstance + engine: mariadb + dbInstanceClass: db.t3.micro + autoMinorVersionUpgrade: true + allowMajorVersionUpgrade: true # unset per default (Note: supported dbInstanceClass and dbParameterGroup with correct dbParameterGroupFamily needed, before majorVersion upgrade possible; applyImmediately matters) + applyImmediately: true + skipFinalSnapshot: true +# masterUserPasswordSecretRef: +# key: password +# name: replica-test +# namespace: default + writeConnectionSecretToRef: + name: example-read-replica-1 + namespace: default + providerConfigRef: + name: provider-aws + +# By default, the read replica has the master user credentials as the source database. +#--- +#apiVersion: v1 +#kind: Secret +#metadata: +# name: replica-test +# namespace: default +#type: Opaque +#data: +# password: dGVzdFBhc3N3b3JkITEyMw== # testPassword!123 diff --git a/package/crds/rds.aws.crossplane.io_dbinstances.yaml b/package/crds/rds.aws.crossplane.io_dbinstances.yaml index 0fcbee3742..0e179cf4a3 100644 --- a/package/crds/rds.aws.crossplane.io_dbinstances.yaml +++ b/package/crds/rds.aws.crossplane.io_dbinstances.yaml @@ -1635,6 +1635,206 @@ spec: region: description: Region is which region the DBInstance will be created. type: string + replicateSourceDBClusterID: + description: |- + The identifier of the Multi-AZ DB cluster that will act as the source for + the read replica. Each DB cluster can have up to 15 read replicas. + + Constraints: + + * Must be the identifier of an existing Multi-AZ DB cluster. + + * Can't be specified if the SourceDBInstanceIdentifier parameter is also + specified. + + * The specified DB cluster must have automatic backups enabled, that is, + its backup retention period must be greater than 0. + + * The source DB cluster must be in the same Amazon Web Services Region + as the read replica. Cross-Region replication isn't supported. + type: string + replicateSourceDBClusterIDRef: + description: |- + ReplicateSourceDBClusterIDRef is a reference to a DBCluster used to set + ReplicateSourceDBClusterID. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + replicateSourceDBClusterIDSelector: + description: |- + ReplicateSourceDBClusterIDSelector selects a reference to a DBCluster used to + set ReplicateSourceDBClusterID. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + replicateSourceDBInstanceID: + description: |- + The identifier of the DB instance that will act as the source for the read + replica. Each DB instance can have up to 15 read replicas, with the exception of + Oracle and SQL Server, which can have up to five. + + Constraints: + + - Must be the identifier of an existing Db2, MariaDB, MySQL, Oracle, + PostgreSQL, or SQL Server DB instance. + + * Can't be specified if the SourceDBClusterIdentifier parameter is also + specified. + + - For the limitations of Oracle read replicas, see [Version and licensing considerations for RDS for Oracle replicas]in the Amazon RDS User + Guide. + + - For the limitations of SQL Server read replicas, see [Read replica limitations with SQL Server]in the Amazon RDS User + Guide. + + - The specified DB instance must have automatic backups enabled, that is, its + backup retention period must be greater than 0. + + [Read replica limitations with SQL Server]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.ReadReplicas.html#SQLServer.ReadReplicas.Limitations + [Version and licensing considerations for RDS for Oracle replicas]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.limitations.html#oracle-read-replicas.limitations.versions-and-licenses + type: string + replicateSourceDBInstanceIDRef: + description: |- + ReplicateSourceDBInstanceIDRef is a reference to a DBInstance used to set + ReplicateSourceDBInstanceID. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + replicateSourceDBInstanceIDSelector: + description: |- + ReplicateSourceDBInstanceIDSelector selects a reference to a DBInstance used to + set ReplicateSourceDBInstanceID. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object restoreFrom: description: RestoreFrom specifies the details of the backup to restore when creating a new DBInstance. @@ -1770,6 +1970,13 @@ spec: Default: io1, if the Iops parameter is specified. Otherwise, gp2. type: string + tagIgnorePrefixes: + description: |- + TagIgnorePrefixes defines a list of tag key prefixes that should be ignored + during tag comparison and updates by the reconciler. + items: + type: string + type: array tags: description: Tags to assign to the DB instance. items: @@ -2157,6 +2364,9 @@ spec: For more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing) in the Amazon Web Services Outposts User Guide. type: boolean + databaseRole: + description: The database role may be Standalone, Primary or Replica. + type: string dbClusterIdentifier: description: |- If the DB instance is a member of a DB cluster, indicates the name of the diff --git a/pkg/clients/rds/common.go b/pkg/clients/rds/common.go index c45dc65313..2934a665ab 100644 --- a/pkg/clients/rds/common.go +++ b/pkg/clients/rds/common.go @@ -92,7 +92,7 @@ func GetSecretValue(ctx context.Context, kube client.Client, ref *xpv1.SecretKey // GetDesiredPassword calculates the desired password from cache/masterPasswordSecretRef func GetDesiredPassword(ctx context.Context, kube client.Client, cr svcapitypes.RDSClusterOrInstance) (desiredPassword string, err error) { - cachedPassword, err := getCachedPassword(ctx, kube, cr) + cachedPassword, err := GetCachedPassword(ctx, kube, cr) if err != nil { return "", errors.Wrap(err, errGetCachedPassword) } @@ -120,7 +120,7 @@ func PasswordUpToDate(ctx context.Context, kube client.Client, cr svcapitypes.RD if err != nil { return false, errors.Wrap(err, errGetCachedRestoreInfo) } - cachedPassword, err := getCachedPassword(ctx, kube, cr) + cachedPassword, err := GetCachedPassword(ctx, kube, cr) if err != nil { return false, errors.Wrap(err, errGetCachedPassword) } @@ -157,7 +157,7 @@ func getCachedRestoreInfo(ctx context.Context, kube client.Client, mg resource.M return state, err } -func getCachedPassword(ctx context.Context, kube client.Client, mg resource.Managed) (pw string, err error) { +func GetCachedPassword(ctx context.Context, kube client.Client, mg resource.Managed) (pw string, err error) { secretKeyRef := &xpv1.SecretKeySelector{ SecretReference: getCachingSecretRef(mg), Key: PasswordCacheKey, diff --git a/pkg/clients/rds/common_test.go b/pkg/clients/rds/common_test.go index df4e6d3be1..e884092a78 100644 --- a/pkg/clients/rds/common_test.go +++ b/pkg/clients/rds/common_test.go @@ -636,7 +636,7 @@ func Test_getCachedPassword(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - got, err := getCachedPassword(context.Background(), tc.args.kube, tc.args.cr) + got, err := GetCachedPassword(context.Background(), tc.args.kube, tc.args.cr) if diff := cmp.Diff(tc.want.value, got); diff != "" { t.Errorf("\n%s\ngetCachedPassword(...): -want, +got:\n", diff) diff --git a/pkg/clients/rds/dbinstance.go b/pkg/clients/rds/dbinstance.go index bb09d244b4..570647ee26 100644 --- a/pkg/clients/rds/dbinstance.go +++ b/pkg/clients/rds/dbinstance.go @@ -200,3 +200,154 @@ func GenerateRestoreDBInstanceToPointInTimeInput(name string, p *v1alpha1.DBInst } return c } + +// GenerateCreateDBInstanceReadReplicaInput returns a create input. +func GenerateCreateDBInstanceReadReplicaInput(cr *v1alpha1.DBInstance) *svcsdk.CreateDBInstanceReadReplicaInput { //nolint:gocyclo + res := &svcsdk.CreateDBInstanceReadReplicaInput{} + + if cr.Spec.ForProvider.AllocatedStorage != nil { + res.SetAllocatedStorage(*cr.Spec.ForProvider.AllocatedStorage) + } + if cr.Spec.ForProvider.AutoMinorVersionUpgrade != nil { + res.SetAutoMinorVersionUpgrade(*cr.Spec.ForProvider.AutoMinorVersionUpgrade) + } + if cr.Spec.ForProvider.AvailabilityZone != nil { + res.SetAvailabilityZone(*cr.Spec.ForProvider.AvailabilityZone) + } + if cr.Spec.ForProvider.CopyTagsToSnapshot != nil { + res.SetCopyTagsToSnapshot(*cr.Spec.ForProvider.CopyTagsToSnapshot) + } + if cr.Spec.ForProvider.CustomIAMInstanceProfile != nil { + res.SetCustomIamInstanceProfile(*cr.Spec.ForProvider.CustomIAMInstanceProfile) + } + if cr.Spec.ForProvider.DBInstanceClass != nil { + res.SetDBInstanceClass(*cr.Spec.ForProvider.DBInstanceClass) + } + if cr.Spec.ForProvider.DBParameterGroupName != nil { + res.SetDBParameterGroupName(*cr.Spec.ForProvider.DBParameterGroupName) + } + if cr.Spec.ForProvider.DBSubnetGroupName != nil { + res.SetDBSubnetGroupName(*cr.Spec.ForProvider.DBSubnetGroupName) + } + if cr.Spec.ForProvider.DedicatedLogVolume != nil { + res.SetDedicatedLogVolume(*cr.Spec.ForProvider.DedicatedLogVolume) + } + if cr.Spec.ForProvider.DeletionProtection != nil { + res.SetDeletionProtection(*cr.Spec.ForProvider.DeletionProtection) + } + if cr.Spec.ForProvider.Domain != nil { + res.SetDomain(*cr.Spec.ForProvider.Domain) + } + if cr.Spec.ForProvider.DomainAuthSecretARN != nil { + res.SetDomainAuthSecretArn(*cr.Spec.ForProvider.DomainAuthSecretARN) + } + if cr.Spec.ForProvider.DomainDNSIPs != nil { + res.SetDomainDnsIps(cr.Spec.ForProvider.DomainDNSIPs) + } + if cr.Spec.ForProvider.DomainFqdn != nil { + res.SetDomainFqdn(*cr.Spec.ForProvider.DomainFqdn) + } + if cr.Spec.ForProvider.DomainIAMRoleName != nil { + res.SetDomainIAMRoleName(*cr.Spec.ForProvider.DomainIAMRoleName) + } + if cr.Spec.ForProvider.DomainOu != nil { + res.SetDomainOu(*cr.Spec.ForProvider.DomainOu) + } + if cr.Spec.ForProvider.EnableCloudwatchLogsExports != nil { + res.SetEnableCloudwatchLogsExports(cr.Spec.ForProvider.EnableCloudwatchLogsExports) + } + if cr.Spec.ForProvider.EnableCustomerOwnedIP != nil { + res.SetEnableCustomerOwnedIp(*cr.Spec.ForProvider.EnableCustomerOwnedIP) + } + if cr.Spec.ForProvider.EnableIAMDatabaseAuthentication != nil { + res.SetEnableIAMDatabaseAuthentication(*cr.Spec.ForProvider.EnableIAMDatabaseAuthentication) + } + if cr.Spec.ForProvider.EnablePerformanceInsights != nil { + res.SetEnablePerformanceInsights(*cr.Spec.ForProvider.EnablePerformanceInsights) + } + if cr.Spec.ForProvider.IOPS != nil { + res.SetIops(*cr.Spec.ForProvider.IOPS) + } + if cr.Spec.ForProvider.KMSKeyID != nil { + res.SetKmsKeyId(*cr.Spec.ForProvider.KMSKeyID) + + } + if cr.Spec.ForProvider.MaxAllocatedStorage != nil { + res.SetMaxAllocatedStorage(*cr.Spec.ForProvider.MaxAllocatedStorage) + } + if cr.Spec.ForProvider.MonitoringInterval != nil { + res.SetMonitoringInterval(*cr.Spec.ForProvider.MonitoringInterval) + } + if cr.Spec.ForProvider.MonitoringRoleARN != nil { + res.SetMonitoringRoleArn(*cr.Spec.ForProvider.MonitoringRoleARN) + } + if cr.Spec.ForProvider.MultiAZ != nil { + res.SetMultiAZ(*cr.Spec.ForProvider.MultiAZ) + } + if cr.Spec.ForProvider.NetworkType != nil { + res.SetNetworkType(*cr.Spec.ForProvider.NetworkType) + } + if cr.Spec.ForProvider.OptionGroupName != nil { + res.SetOptionGroupName(*cr.Spec.ForProvider.OptionGroupName) + } + if cr.Spec.ForProvider.PerformanceInsightsKMSKeyID != nil { + res.SetPerformanceInsightsKMSKeyId(*cr.Spec.ForProvider.PerformanceInsightsKMSKeyID) + } + if cr.Spec.ForProvider.PerformanceInsightsRetentionPeriod != nil { + res.SetPerformanceInsightsRetentionPeriod(*cr.Spec.ForProvider.PerformanceInsightsRetentionPeriod) + } + if cr.Spec.ForProvider.Port != nil { + res.SetPort(*cr.Spec.ForProvider.Port) + } + if cr.Spec.ForProvider.ProcessorFeatures != nil { + var processorFeatures []*svcsdk.ProcessorFeature + for _, pf := range cr.Spec.ForProvider.ProcessorFeatures { + pfeature := &svcsdk.ProcessorFeature{} + if pf.Name != nil { + pfeature.SetName(*pf.Name) + } + if pf.Value != nil { + pfeature.SetValue(*pf.Value) + } + processorFeatures = append(processorFeatures, pfeature) + } + res.SetProcessorFeatures(processorFeatures) + } + if cr.Spec.ForProvider.PubliclyAccessible != nil { + res.SetPubliclyAccessible(*cr.Spec.ForProvider.PubliclyAccessible) + } + if cr.Spec.ForProvider.ReplicateSourceDBClusterID != nil { + res.SetSourceDBClusterIdentifier(*cr.Spec.ForProvider.ReplicateSourceDBClusterID) + } + if cr.Spec.ForProvider.ReplicateSourceDBInstanceID != nil { + res.SetSourceDBInstanceIdentifier(*cr.Spec.ForProvider.ReplicateSourceDBInstanceID) + } + if cr.Spec.ForProvider.StorageThroughput != nil { + res.SetStorageThroughput(*cr.Spec.ForProvider.StorageThroughput) + } + if cr.Spec.ForProvider.StorageType != nil { + res.SetStorageType(*cr.Spec.ForProvider.StorageType) + } + if cr.Spec.ForProvider.Tags != nil { + var tags []*svcsdk.Tag + for _, t := range cr.Spec.ForProvider.Tags { + tag := &svcsdk.Tag{} + if t.Key != nil { + tag.SetKey(*t.Key) + } + if t.Value != nil { + tag.SetValue(*t.Value) + } + tags = append(tags, tag) + } + res.SetTags(tags) + } + if cr.Spec.ForProvider.VPCSecurityGroupIDs != nil { + var vpcSecurityGroupIDs []*string + for _, v := range cr.Spec.ForProvider.VPCSecurityGroupIDs { + vpcSecurityGroupIDs = append(vpcSecurityGroupIDs, &v) + } + res.SetVpcSecurityGroupIds(vpcSecurityGroupIDs) + } + return res +} diff --git a/pkg/clients/rds/fake/fake.go b/pkg/clients/rds/fake/fake.go new file mode 100644 index 0000000000..cee798d7e7 --- /dev/null +++ b/pkg/clients/rds/fake/fake.go @@ -0,0 +1,56 @@ +package fake + +import ( + "context" + + "github.com/aws/aws-sdk-go/aws/request" + svcsdk "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/service/rds/rdsiface" +) + +// MockRDSClient is a type that implements of some methods for RDSAPI interface +type MockRDSClient struct { + rdsiface.RDSAPI + MockCreateDBInstanceReadReplicaWithContext func( + ctx context.Context, + input *svcsdk.CreateDBInstanceReadReplicaInput, + optFns ...request.Option, + ) (*svcsdk.CreateDBInstanceReadReplicaOutput, error) + MockCreateDBInstanceWithContext func( + ctx context.Context, + input *svcsdk.CreateDBInstanceInput, + optFns ...request.Option, + ) (*svcsdk.CreateDBInstanceOutput, error) + MockDescribeDBInstancesWithContext func( + ctx context.Context, + input *svcsdk.DescribeDBInstancesInput, + optFns ...request.Option, + ) (*svcsdk.DescribeDBInstancesOutput, error) +} + +// CreateDBInstanceReadReplicaWithContext mocks CreateDBInstanceReadReplicaWithContext method for aws-sdk client +func (m *MockRDSClient) CreateDBInstanceReadReplicaWithContext( + ctx context.Context, + input *svcsdk.CreateDBInstanceReadReplicaInput, + optFns ...request.Option, +) (*svcsdk.CreateDBInstanceReadReplicaOutput, error) { + return m.MockCreateDBInstanceReadReplicaWithContext(ctx, input, optFns...) +} + +// CreateDBInstanceWithContext mocks CreateDBInstanceWithContext method for aws-sdk client +func (m *MockRDSClient) CreateDBInstanceWithContext( + ctx context.Context, + input *svcsdk.CreateDBInstanceInput, + optFns ...request.Option, +) (*svcsdk.CreateDBInstanceOutput, error) { + return m.MockCreateDBInstanceWithContext(ctx, input, optFns...) +} + +// DescribeDBInstancesWithContext mocks DescribeDBInstancesWithContext method for aws-sdk client +func (m *MockRDSClient) DescribeDBInstancesWithContext( + ctx context.Context, + input *svcsdk.DescribeDBInstancesInput, + optFns ...request.Option, +) (*svcsdk.DescribeDBInstancesOutput, error) { + return m.MockDescribeDBInstancesWithContext(ctx, input, optFns...) +} diff --git a/pkg/controller/rds/dbinstance/setup.go b/pkg/controller/rds/dbinstance/setup.go index 027ebf29ad..785f96f654 100644 --- a/pkg/controller/rds/dbinstance/setup.go +++ b/pkg/controller/rds/dbinstance/setup.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" svcsdk "github.com/aws/aws-sdk-go/service/rds" svcsdkapi "github.com/aws/aws-sdk-go/service/rds/rdsiface" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -32,6 +33,7 @@ import ( dbinstance "github.com/crossplane-contrib/provider-aws/pkg/clients/rds" "github.com/crossplane-contrib/provider-aws/pkg/controller/rds/utils" "github.com/crossplane-contrib/provider-aws/pkg/features" + connectaws "github.com/crossplane-contrib/provider-aws/pkg/utils/connect/aws" errorutils "github.com/crossplane-contrib/provider-aws/pkg/utils/errors" "github.com/crossplane-contrib/provider-aws/pkg/utils/jsonpatch" "github.com/crossplane-contrib/provider-aws/pkg/utils/pointer" @@ -40,6 +42,7 @@ import ( // error constants const ( + errCreateReadReplica = "cannot creat DB instance read replica" errS3RestoreFailed = "cannot restore DB instance from S3 backup" errSnapshotRestoreFailed = "cannot restore DB instance from snapshot" errPointInTimeRestoreFailed = "cannot restore DB instance from point in time" @@ -54,6 +57,13 @@ const ( backupWindowFormat = "15:04" ) +// database roles +const ( + databaseRoleStandalone = "Instance" + databaseRolePrimary = "Primary" + databaseRoleReadReplica = "Replica" +) + // other const ( statusDeleting = "deleting" @@ -62,21 +72,6 @@ const ( // SetupDBInstance adds a controller that reconciles DBInstance func SetupDBInstance(mgr ctrl.Manager, o controller.Options) error { name := managed.ControllerName(svcapitypes.DBInstanceGroupKind) - opts := []option{ - func(e *external) { - c := &custom{client: e.client, kube: e.kube, external: e} - e.lateInitialize = lateInitialize - e.isUpToDate = c.isUpToDate - e.preObserve = preObserve - e.postObserve = c.postObserve - e.preCreate = c.preCreate - e.preDelete = c.preDelete - e.postDelete = c.postDelete - e.filterList = filterList - e.preUpdate = c.preUpdate - e.postUpdate = c.postUpdate - }, - } cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} if o.Features.Enabled(features.EnableAlphaExternalSecretStores) { @@ -85,7 +80,7 @@ func SetupDBInstance(mgr ctrl.Manager, o controller.Options) error { reconcilerOpts := []managed.ReconcilerOption{ managed.WithCriticalAnnotationUpdater(custommanaged.NewRetryingCriticalAnnotationUpdater(mgr.GetClient())), - managed.WithTypedExternalConnector(&connector{kube: mgr.GetClient(), opts: opts}), + managed.WithTypedExternalConnector(&customConnector{kube: mgr.GetClient()}), managed.WithPollInterval(o.PollInterval), managed.WithLogger(o.Logger.WithValues("controller", name)), managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), @@ -108,23 +103,90 @@ func SetupDBInstance(mgr ctrl.Manager, o controller.Options) error { Complete(r) } -type custom struct { - kube client.Client - client svcsdkapi.RDSAPI - external *external +type customConnector struct { + kube client.Client +} + +type customExternal struct { + external + shared *shared +} - cache struct { - addTags []*svcsdk.Tag - removeTags []*string +type shared struct { + external + cache *cache +} + +type cache struct { + addTags []*svcsdk.Tag + removeTags []*string + desiredPassword string +} + +func newCustomExternal(kube client.Client, client svcsdkapi.RDSAPI) *customExternal { + s := &shared{cache: &cache{}} + e := external{ + kube: kube, + client: client, + preObserve: preObserve, + isUpToDate: s.isUpToDate, + postObserve: s.postObserve, + preUpdate: s.preUpdate, + postUpdate: s.postUpdate, + preCreate: s.preCreate, + postCreate: nopPostCreate, + preDelete: s.preDelete, + postDelete: s.postDelete, + filterList: filterList, + lateInitialize: lateInitialize, + } + s.external = e + return &customExternal{ + external: e, + shared: s, } } +func (c *customConnector) Connect(ctx context.Context, cr *svcapitypes.DBInstance) (managed.TypedExternalClient[*svcapitypes.DBInstance], error) { + sess, err := connectaws.GetConfigV1(ctx, c.kube, cr, cr.Spec.ForProvider.Region) + if err != nil { + return nil, errors.Wrap(err, errCreateSession) + } + return newCustomExternal(c.kube, svcsdk.New(sess)), nil +} + +func (c *customExternal) Create(ctx context.Context, cr *svcapitypes.DBInstance) (managed.ExternalCreation, error) { + if cr.Spec.ForProvider.ReplicateSourceDBInstanceID != nil || cr.Spec.ForProvider.ReplicateSourceDBInstanceIDRef != nil || + cr.Spec.ForProvider.ReplicateSourceDBInstanceIDSelector != nil || cr.Spec.ForProvider.ReplicateSourceDBClusterID != nil || + cr.Spec.ForProvider.ReplicateSourceDBClusterIDRef != nil || cr.Spec.ForProvider.ReplicateSourceDBClusterIDSelector != nil { + cr.Status.SetConditions(xpv1.Creating()) + cr.Status.AtProvider.DatabaseRole = aws.String(databaseRoleReadReplica) + + createDBInstanceReadReplicaInput := dbinstance.GenerateCreateDBInstanceReadReplicaInput(cr) + createDBInstanceReadReplicaInput.DBInstanceIdentifier = pointer.ToOrNilIfZeroValue(meta.GetExternalName(cr)) + + _, err := c.client.CreateDBInstanceReadReplicaWithContext(ctx, createDBInstanceReadReplicaInput) + if err != nil { + return managed.ExternalCreation{}, errors.Wrap(err, errCreateReadReplica) + } + return managed.ExternalCreation{}, nil + } + cr.Status.AtProvider.DatabaseRole = aws.String(databaseRoleStandalone) + return c.external.Create(ctx, cr) +} + func preObserve(_ context.Context, cr *svcapitypes.DBInstance, obj *svcsdk.DescribeDBInstancesInput) error { obj.DBInstanceIdentifier = pointer.ToOrNilIfZeroValue(meta.GetExternalName(cr)) return nil } -func (e *custom) preCreate(ctx context.Context, cr *svcapitypes.DBInstance, obj *svcsdk.CreateDBInstanceInput) (err error) { //nolint:gocyclo +func (s *shared) preCreate(ctx context.Context, cr *svcapitypes.DBInstance, obj *svcsdk.CreateDBInstanceInput) (err error) { //nolint:gocyclo + // If the DBInstance is going to be created as a read replica, we do not need to set the MasterUserPassword and the others + if cr.Spec.ForProvider.ReplicateSourceDBInstanceID != nil || cr.Spec.ForProvider.ReplicateSourceDBInstanceIDRef != nil || + cr.Spec.ForProvider.ReplicateSourceDBInstanceIDSelector != nil || cr.Spec.ForProvider.ReplicateSourceDBClusterID != nil || + cr.Spec.ForProvider.ReplicateSourceDBClusterIDRef != nil || cr.Spec.ForProvider.ReplicateSourceDBClusterIDSelector != nil { + return nil + } restoreFrom := cr.Spec.ForProvider.RestoreFrom autogenerate := cr.Spec.ForProvider.AutogeneratePassword masterUserPasswordSecretRef := cr.Spec.ForProvider.MasterUserPasswordSecretRef @@ -140,7 +202,7 @@ func (e *custom) preCreate(ctx context.Context, cr *svcapitypes.DBInstance, obj pw, err = password.Generate() case masterUserPasswordSecretRef != nil && autogenerate, masterUserPasswordSecretRef != nil && !autogenerate: - pw, err = dbinstance.GetSecretValue(ctx, e.kube, masterUserPasswordSecretRef) + pw, err = dbinstance.GetSecretValue(ctx, s.kube, masterUserPasswordSecretRef) } if err != nil { return errors.Wrap(err, dbinstance.ErrNoRetrievePasswordOrGenerate) @@ -175,18 +237,18 @@ func (e *custom) preCreate(ctx context.Context, cr *svcapitypes.DBInstance, obj switch *restoreFrom.Source { case "S3": - _, err := e.client.RestoreDBInstanceFromS3WithContext(ctx, dbinstance.GenerateRestoreDBInstanceFromS3Input(meta.GetExternalName(cr), pw, &cr.Spec.ForProvider)) + _, err := s.client.RestoreDBInstanceFromS3WithContext(ctx, dbinstance.GenerateRestoreDBInstanceFromS3Input(meta.GetExternalName(cr), pw, &cr.Spec.ForProvider)) if err != nil { return errorutils.Wrap(err, errS3RestoreFailed) } case "Snapshot": - _, err := e.client.RestoreDBInstanceFromDBSnapshotWithContext(ctx, dbinstance.GenerateRestoreDBInstanceFromSnapshotInput(meta.GetExternalName(cr), &cr.Spec.ForProvider)) + _, err := s.client.RestoreDBInstanceFromDBSnapshotWithContext(ctx, dbinstance.GenerateRestoreDBInstanceFromSnapshotInput(meta.GetExternalName(cr), &cr.Spec.ForProvider)) if err != nil { return errorutils.Wrap(err, errSnapshotRestoreFailed) } case "PointInTime": - _, err := e.client.RestoreDBInstanceToPointInTimeWithContext(ctx, dbinstance.GenerateRestoreDBInstanceToPointInTimeInput(meta.GetExternalName(cr), &cr.Spec.ForProvider)) + _, err := s.client.RestoreDBInstanceToPointInTimeWithContext(ctx, dbinstance.GenerateRestoreDBInstanceToPointInTimeInput(meta.GetExternalName(cr), &cr.Spec.ForProvider)) if err != nil { return errorutils.Wrap(err, errPointInTimeRestoreFailed) } @@ -199,7 +261,7 @@ func (e *custom) preCreate(ctx context.Context, cr *svcapitypes.DBInstance, obj obj.EngineVersion = cr.Spec.ForProvider.EngineVersion } - if _, err = dbinstance.Cache(ctx, e.kube, cr, passwordRestoreInfo); err != nil { + if _, err = dbinstance.Cache(ctx, s.kube, cr, passwordRestoreInfo); err != nil { return errors.Wrap(err, dbinstance.ErrCachePassword) } @@ -214,18 +276,20 @@ func (e *custom) preCreate(ctx context.Context, cr *svcapitypes.DBInstance, obj return nil } -func (e *custom) updateConnectionDetails(ctx context.Context, cr *svcapitypes.DBInstance, details managed.ConnectionDetails) (managed.ConnectionDetails, error) { +func (s *shared) updateConnectionDetails(ctx context.Context, cr *svcapitypes.DBInstance, details managed.ConnectionDetails) (managed.ConnectionDetails, error) { if details == nil { details = managed.ConnectionDetails{} } details[xpv1.ResourceCredentialsSecretUserKey] = []byte(pointer.StringValue(cr.Spec.ForProvider.MasterUsername)) - - pw, err := dbinstance.GetDesiredPassword(ctx, e.kube, cr) - if err != nil { - return details, errors.Wrap(err, dbinstance.ErrGetCachedPassword) + if s.cache.desiredPassword == "" { + pw, err := dbinstance.GetDesiredPassword(ctx, s.kube, cr) + if err != nil && pointer.StringValue(cr.Status.AtProvider.DatabaseRole) != databaseRoleReadReplica { + return details, errors.Wrap(err, dbinstance.ErrGetCachedPassword) + } + s.cache.desiredPassword = pw } - details[xpv1.ResourceCredentialsSecretPasswordKey] = []byte(pw) + details[xpv1.ResourceCredentialsSecretPasswordKey] = []byte(s.cache.desiredPassword) if cr.Status.AtProvider.Endpoint == nil { return details, nil @@ -240,15 +304,10 @@ func (e *custom) updateConnectionDetails(ctx context.Context, cr *svcapitypes.DB return details, nil } -func (e *custom) preUpdate(ctx context.Context, cr *svcapitypes.DBInstance, obj *svcsdk.ModifyDBInstanceInput) (err error) { +func (s *shared) preUpdate(ctx context.Context, cr *svcapitypes.DBInstance, obj *svcsdk.ModifyDBInstanceInput) (err error) { obj.DBInstanceIdentifier = pointer.ToOrNilIfZeroValue(meta.GetExternalName(cr)) obj.ApplyImmediately = cr.Spec.ForProvider.ApplyImmediately - - desiredPassword, err := dbinstance.GetDesiredPassword(ctx, e.kube, cr) - if err != nil { - return errors.Wrap(err, dbinstance.ErrRetrievePasswordForUpdate) - } - obj.MasterUserPassword = pointer.ToOrNilIfZeroValue(desiredPassword) + obj.MasterUserPassword = pointer.ToOrNilIfZeroValue(s.cache.desiredPassword) // VpcSecurityGroupIds cannot be set on an instance that belongs to a DBCluster if cr.Status.AtProvider.DBClusterIdentifier == nil { @@ -272,7 +331,7 @@ func (e *custom) preUpdate(ctx context.Context, cr *svcapitypes.DBInstance, obj input := GenerateDescribeDBInstancesInput(cr) - out, err := e.client.DescribeDBInstancesWithContext(ctx, input) + out, err := s.client.DescribeDBInstancesWithContext(ctx, input) if err != nil { return errors.Wrap(err, dbinstance.ErrDescribe) } @@ -283,40 +342,37 @@ func (e *custom) preUpdate(ctx context.Context, cr *svcapitypes.DBInstance, obj return nil } -func (e *custom) postUpdate(ctx context.Context, cr *svcapitypes.DBInstance, out *svcsdk.ModifyDBInstanceOutput, upd managed.ExternalUpdate, err error) (managed.ExternalUpdate, error) { +func (s *shared) postUpdate(ctx context.Context, cr *svcapitypes.DBInstance, out *svcsdk.ModifyDBInstanceOutput, upd managed.ExternalUpdate, err error) (managed.ExternalUpdate, error) { if err != nil { return upd, err } - desiredPassword, err := dbinstance.GetDesiredPassword(ctx, e.kube, cr) - if err != nil { - return upd, errors.Wrap(err, dbinstance.ErrRetrievePasswordForUpdate) - } + upd.ConnectionDetails, err = s.updateConnectionDetails(ctx, cr, upd.ConnectionDetails) - _, err = dbinstance.Cache(ctx, e.kube, cr, map[string]string{ - dbinstance.PasswordCacheKey: desiredPassword, - dbinstance.RestoreFlagCacheKay: "", // reset restore flag - }) - if err != nil { - return upd, errors.Wrap(err, dbinstance.ErrCachePassword) + if s.cache.desiredPassword != "" { + _, err = dbinstance.Cache(ctx, s.kube, cr, map[string]string{ + dbinstance.PasswordCacheKey: s.cache.desiredPassword, + dbinstance.RestoreFlagCacheKay: "", // reset restore flag + }) + if err != nil { + return upd, errors.Wrap(err, dbinstance.ErrCachePassword) + } } - upd.ConnectionDetails, err = e.updateConnectionDetails(ctx, cr, upd.ConnectionDetails) - // Update tags if necessary - if len(e.cache.addTags) > 0 { - _, err := e.client.AddTagsToResourceWithContext(ctx, &svcsdk.AddTagsToResourceInput{ + if len(s.cache.addTags) > 0 { + _, err := s.client.AddTagsToResourceWithContext(ctx, &svcsdk.AddTagsToResourceInput{ ResourceName: out.DBInstance.DBInstanceArn, - Tags: e.cache.addTags, + Tags: s.cache.addTags, }) if err != nil { return upd, errors.Wrap(err, errAddTags) } } - if len(e.cache.removeTags) > 0 { - _, err := e.client.RemoveTagsFromResourceWithContext(ctx, &svcsdk.RemoveTagsFromResourceInput{ + if len(s.cache.removeTags) > 0 { + _, err := s.client.RemoveTagsFromResourceWithContext(ctx, &svcsdk.RemoveTagsFromResourceInput{ ResourceName: out.DBInstance.DBInstanceArn, - TagKeys: e.cache.removeTags, + TagKeys: s.cache.removeTags, }) if err != nil { return upd, errors.Wrap(err, errRemoveTags) @@ -326,28 +382,28 @@ func (e *custom) postUpdate(ctx context.Context, cr *svcapitypes.DBInstance, out return upd, err } -func (e *custom) preDelete(ctx context.Context, cr *svcapitypes.DBInstance, obj *svcsdk.DeleteDBInstanceInput) (bool, error) { +func (s *shared) preDelete(ctx context.Context, cr *svcapitypes.DBInstance, obj *svcsdk.DeleteDBInstanceInput) (bool, error) { obj.DBInstanceIdentifier = pointer.ToOrNilIfZeroValue(meta.GetExternalName(cr)) obj.FinalDBSnapshotIdentifier = pointer.ToOrNilIfZeroValue(cr.Spec.ForProvider.FinalDBSnapshotIdentifier) obj.SkipFinalSnapshot = pointer.ToOrNilIfZeroValue(cr.Spec.ForProvider.SkipFinalSnapshot) obj.DeleteAutomatedBackups = cr.Spec.ForProvider.DeleteAutomatedBackups - _, _ = e.external.Update(ctx, cr) + _, _ = s.external.Update(ctx, cr) if *cr.Status.AtProvider.DBInstanceStatus == statusDeleting { return true, nil } return false, nil } -func (e *custom) postDelete(ctx context.Context, cr *svcapitypes.DBInstance, obj *svcsdk.DeleteDBInstanceOutput, err error) (managed.ExternalDelete, error) { +func (s *shared) postDelete(ctx context.Context, cr *svcapitypes.DBInstance, obj *svcsdk.DeleteDBInstanceOutput, err error) (managed.ExternalDelete, error) { if err != nil { return managed.ExternalDelete{}, err } - return managed.ExternalDelete{}, dbinstance.DeleteCache(ctx, e.kube, cr) + return managed.ExternalDelete{}, dbinstance.DeleteCache(ctx, s.kube, cr) } -func (e *custom) postObserve(ctx context.Context, cr *svcapitypes.DBInstance, resp *svcsdk.DescribeDBInstancesOutput, obs managed.ExternalObservation, err error) (managed.ExternalObservation, error) { +func (s *shared) postObserve(ctx context.Context, cr *svcapitypes.DBInstance, resp *svcsdk.DescribeDBInstancesOutput, obs managed.ExternalObservation, err error) (managed.ExternalObservation, error) { if err != nil { return obs, err } @@ -369,7 +425,7 @@ func (e *custom) postObserve(ctx context.Context, cr *svcapitypes.DBInstance, re cr.SetConditions(xpv1.Unavailable().WithMessage("DB Instance is " + pointer.StringValue(resp.DBInstances[0].DBInstanceStatus))) } - obs.ConnectionDetails, err = e.updateConnectionDetails(ctx, cr, obs.ConnectionDetails) + obs.ConnectionDetails, err = s.updateConnectionDetails(ctx, cr, obs.ConnectionDetails) return obs, err } @@ -463,7 +519,7 @@ func lateInitialize(in *svcapitypes.DBInstanceParameters, out *svcsdk.DescribeDB return nil } -func (e *custom) isUpToDate(ctx context.Context, cr *svcapitypes.DBInstance, out *svcsdk.DescribeDBInstancesOutput) (upToDate bool, diff string, err error) { //nolint:gocyclo +func (s *shared) isUpToDate(ctx context.Context, cr *svcapitypes.DBInstance, out *svcsdk.DescribeDBInstancesOutput) (upToDate bool, diff string, err error) { //nolint:gocyclo db := out.DBInstances[0] patch, err := createPatch(out, &cr.Spec.ForProvider) @@ -480,13 +536,36 @@ func (e *custom) isUpToDate(ctx context.Context, cr *svcapitypes.DBInstance, out if status == "modifying" || status == "upgrading" || status == "rebooting" || status == "creating" || status == "deleting" { return true, "", nil } + switch { + case db.ReadReplicaDBClusterIdentifiers != nil || db.ReadReplicaDBInstanceIdentifiers != nil: + cr.Status.AtProvider.DatabaseRole = aws.String(databaseRolePrimary) + case db.ReadReplicaSourceDBClusterIdentifier != nil || db.ReadReplicaSourceDBInstanceIdentifier != nil: + cr.Status.AtProvider.DatabaseRole = aws.String(databaseRoleReadReplica) + default: + cr.Status.AtProvider.DatabaseRole = aws.String(databaseRoleStandalone) + } - passwordUpToDate, err := dbinstance.PasswordUpToDate(ctx, e.kube, cr) + autogenerate := cr.Spec.ForProvider.AutogeneratePassword + masterUserPasswordSecretRef := cr.Spec.ForProvider.MasterUserPasswordSecretRef + cachedMasterPasswordExist := true + _, err = dbinstance.GetCachedPassword(ctx, s.kube, cr) if err != nil { - return false, "", errors.Wrap(err, dbinstance.ErrNoPasswordUpToDate) + cachedMasterPasswordExist = false } - if !passwordUpToDate { - return false, "", nil + + // If the instance is a read replica and the password was not created before, and it is not assumed to be + // generated/created(by autogenerate or masterUserPasswordSecretRef), we don't check the password. By + // default, a read replica has the same credentials as the primary instance. + if !(pointer.StringValue(cr.Status.AtProvider.DatabaseRole) == databaseRoleReadReplica && + !autogenerate && masterUserPasswordSecretRef == nil && !cachedMasterPasswordExist) { + + passwordUpToDate, err := dbinstance.PasswordUpToDate(ctx, s.kube, cr) + if err != nil { + return false, "", errors.Wrap(err, dbinstance.ErrNoPasswordUpToDate) + } + if !passwordUpToDate { + return false, "", nil + } } // (PocketMobsters): AWS reformats our preferred time windows for backups and maintenance, @@ -500,7 +579,7 @@ func (e *custom) isUpToDate(ctx context.Context, cr *svcapitypes.DBInstance, out return false, "", err } - // Depending on whether the instance was created as gp2 or modified from another type (e.g. gp3) to gp2, + // Depending on whether the instance was created as gp2 or modified from another type (s.g. gp3) to gp2, // AWS provides different responses for IOPS/StorageThroughput (either 0 or nil). // Therefore, we consider both 0 and nil to be equivalent. iopsChanged := !(pointer.Int64Value(cr.Spec.ForProvider.IOPS) == pointer.Int64Value(db.Iops)) @@ -532,10 +611,25 @@ func (e *custom) isUpToDate(ctx context.Context, cr *svcapitypes.DBInstance, out cmpopts.IgnoreFields(svcapitypes.CustomDBInstanceParameters{}, "RestoreFrom"), cmpopts.IgnoreFields(svcapitypes.CustomDBInstanceParameters{}, "VPCSecurityGroupIDs"), cmpopts.IgnoreFields(svcapitypes.CustomDBInstanceParameters{}, "DeleteAutomatedBackups"), + cmpopts.IgnoreFields(svcapitypes.CustomDBInstanceParameters{}, "ReplicateSourceDBInstanceID", "ReplicateSourceDBClusterID"), + cmpopts.IgnoreFields(svcapitypes.CustomDBInstanceParameters{}, "TagIgnorePrefixes"), ) - e.cache.addTags, e.cache.removeTags = utils.DiffTags(cr.Spec.ForProvider.Tags, db.TagList) - tagsChanged := len(e.cache.addTags) != 0 || len(e.cache.removeTags) != 0 + ignore := append([]string{"aws:"}, cr.Spec.ForProvider.TagIgnorePrefixes...) + var observedTags []*svcsdk.Tag + if db.TagList != nil { + for _, tag := range db.TagList { // index discarded with _ + if utils.ShouldIgnore(pointer.StringValue(tag.Key), ignore) { + continue + } + observedTags = append(observedTags, &svcsdk.Tag{ + Key: tag.Key, + Value: tag.Value, + }) + } + } + s.cache.addTags, s.cache.removeTags = utils.DiffTags(cr.Spec.ForProvider.Tags, observedTags) + tagsChanged := len(s.cache.addTags) != 0 || len(s.cache.removeTags) != 0 if diff == "" && !maintenanceWindowChanged && !backupWindowChanged && !iopsChanged && !storageThroughputChanged && !versionChanged && !vpcSGsChanged && !dbParameterGroupChanged && !optionGroupChanged && !tagsChanged { return true, diff, nil @@ -576,7 +670,7 @@ func (e *custom) isUpToDate(ctx context.Context, cr *svcapitypes.DBInstance, out diff += fmt.Sprintf("\ndesired optionGroupName: %s \nobserved optionGroupName: %s ", pointer.StringValue(cr.Spec.ForProvider.OptionGroupName), pointer.StringValue(db.OptionGroupMemberships[0].OptionGroupName)) } if tagsChanged { - diff += fmt.Sprintf("\nadd %d tag(s) and remove %d tag(s)", len(e.cache.addTags), len(e.cache.removeTags)) + diff += fmt.Sprintf("\nadd %d tag(s) and remove %d tag(s)", len(s.cache.addTags), len(s.cache.removeTags)) } log.Println(diff) diff --git a/pkg/controller/rds/dbinstance/setup_test.go b/pkg/controller/rds/dbinstance/setup_test.go new file mode 100644 index 0000000000..eef30e5cc2 --- /dev/null +++ b/pkg/controller/rds/dbinstance/setup_test.go @@ -0,0 +1,268 @@ +package dbinstance + +import ( + "context" + "errors" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + svcsdk "github.com/aws/aws-sdk-go/service/rds" + "github.com/crossplane/crossplane-runtime/pkg/test" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "sigs.k8s.io/controller-runtime/pkg/client" + + svcapitypes "github.com/crossplane-contrib/provider-aws/apis/rds/v1alpha1" + "github.com/crossplane-contrib/provider-aws/pkg/clients/rds/fake" +) + +func TestCreate(t *testing.T) { + type args struct { + cr *svcapitypes.DBInstance + kube client.Client + awsRDSClient fake.MockRDSClient + } + + type want struct { + statusAtProvider *svcapitypes.CustomDBInstanceObservation + err error + } + + cases := map[string]struct { + args + want + }{ + "CreateReadReplica": { + args: args{ + cr: &svcapitypes.DBInstance{ + Spec: svcapitypes.DBInstanceSpec{ + ForProvider: svcapitypes.DBInstanceParameters{ + CustomDBInstanceParameters: svcapitypes.CustomDBInstanceParameters{ + ReplicateSourceDBInstanceID: aws.String("source-db-instance-id"), + }, + }, + }, + }, + kube: test.NewMockClient(), + awsRDSClient: fake.MockRDSClient{ + MockCreateDBInstanceReadReplicaWithContext: func(ctx context.Context, input *svcsdk.CreateDBInstanceReadReplicaInput, optFns ...request.Option) (*svcsdk.CreateDBInstanceReadReplicaOutput, error) { + return &svcsdk.CreateDBInstanceReadReplicaOutput{}, nil + }, + MockCreateDBInstanceWithContext: func(ctx context.Context, input *svcsdk.CreateDBInstanceInput, optFns ...request.Option) (*svcsdk.CreateDBInstanceOutput, error) { + return &svcsdk.CreateDBInstanceOutput{}, nil + }, + }, + }, + want: want{ + statusAtProvider: &svcapitypes.CustomDBInstanceObservation{ + DatabaseRole: aws.String(databaseRoleReadReplica), + }, + }, + }, + "CreateStandaloneInstance": { + args: args{ + cr: &svcapitypes.DBInstance{ + Spec: svcapitypes.DBInstanceSpec{ + ForProvider: svcapitypes.DBInstanceParameters{ + CustomDBInstanceParameters: svcapitypes.CustomDBInstanceParameters{ + AutogeneratePassword: true, + }, + }, + }, + Status: svcapitypes.DBInstanceStatus{ + AtProvider: svcapitypes.DBInstanceObservation{}, + }, + }, + kube: test.NewMockClient(), + awsRDSClient: fake.MockRDSClient{ + MockCreateDBInstanceWithContext: func(ctx context.Context, input *svcsdk.CreateDBInstanceInput, optFns ...request.Option) (*svcsdk.CreateDBInstanceOutput, error) { + return &svcsdk.CreateDBInstanceOutput{DBInstance: &svcsdk.DBInstance{}}, nil + }, + MockCreateDBInstanceReadReplicaWithContext: func(ctx context.Context, input *svcsdk.CreateDBInstanceReadReplicaInput, optFns ...request.Option) (*svcsdk.CreateDBInstanceReadReplicaOutput, error) { + return &svcsdk.CreateDBInstanceReadReplicaOutput{}, nil + }, + }, + }, + want: want{ + statusAtProvider: &svcapitypes.CustomDBInstanceObservation{ + DatabaseRole: aws.String(databaseRoleStandalone), + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + cr := tc.args.cr + ce := newCustomExternal(tc.kube, &tc.awsRDSClient) + _, err := ce.Create(context.TODO(), cr) + + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("r: -want, +got error: \n%s", diff) + } + if diff := cmp.Diff(tc.want.statusAtProvider.DatabaseRole, cr.Status.AtProvider.DatabaseRole); diff != "" { + t.Errorf("r: -want, +got: \n%s", diff) + } + }) + } +} + +func TestIsUpToDate(t *testing.T) { + type args struct { + kube client.Client + cr *svcapitypes.DBInstance + out *svcsdk.DescribeDBInstancesOutput + } + + type want struct { + upToDate bool + err error + statusAtProvider *svcapitypes.CustomDBInstanceObservation + } + + cases := map[string]struct { + args + want + }{ + "UpToDateReadReplicaWithReplicatedMasterCredentials": { + args: args{ + cr: &svcapitypes.DBInstance{ + Spec: svcapitypes.DBInstanceSpec{ + ForProvider: svcapitypes.DBInstanceParameters{ + DeletionProtection: aws.Bool(true), + }, + }, + }, + out: &svcsdk.DescribeDBInstancesOutput{ + DBInstances: []*svcsdk.DBInstance{ + { + DeletionProtection: aws.Bool(true), + ReadReplicaSourceDBClusterIdentifier: aws.String("source-db-instance-id"), + }, + }, + }, + kube: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + return errors.New("not found") + }, + }, + }, + want: want{ + upToDate: true, + err: nil, + statusAtProvider: &svcapitypes.CustomDBInstanceObservation{ + DatabaseRole: aws.String(databaseRoleReadReplica), + }, + }, + }, + "databaseRolePrimary": { + args: args{ + cr: &svcapitypes.DBInstance{ + Spec: svcapitypes.DBInstanceSpec{ + ForProvider: svcapitypes.DBInstanceParameters{ + DeletionProtection: aws.Bool(true), + }, + }, + }, + out: &svcsdk.DescribeDBInstancesOutput{ + DBInstances: []*svcsdk.DBInstance{ + { + DeletionProtection: aws.Bool(true), + ReadReplicaDBInstanceIdentifiers: []*string{aws.String("db-read-replica-id")}, + }, + }, + }, + kube: test.NewMockClient(), + }, + want: want{ + upToDate: true, + err: nil, + statusAtProvider: &svcapitypes.CustomDBInstanceObservation{ + DatabaseRole: aws.String(databaseRolePrimary), + }, + }, + }, + "UpToDateStandaloneInstance": { + args: args{ + cr: &svcapitypes.DBInstance{ + Spec: svcapitypes.DBInstanceSpec{ + ForProvider: svcapitypes.DBInstanceParameters{ + DeletionProtection: aws.Bool(true), + }, + }, + }, + out: &svcsdk.DescribeDBInstancesOutput{ + DBInstances: []*svcsdk.DBInstance{ + { + DeletionProtection: aws.Bool(true), + }, + }, + }, + kube: test.NewMockClient(), + }, + want: want{ + upToDate: true, + err: nil, + statusAtProvider: &svcapitypes.CustomDBInstanceObservation{ + DatabaseRole: aws.String(databaseRoleStandalone), + }, + }, + }, + "IgnoresTagswithTagIgnorePrefixes": { + args: args{ + cr: &svcapitypes.DBInstance{ + Spec: svcapitypes.DBInstanceSpec{ + ForProvider: svcapitypes.DBInstanceParameters{ + CustomDBInstanceParameters: svcapitypes.CustomDBInstanceParameters{ + TagIgnorePrefixes: []string{"aws:", "c7n:"}, + }, + Tags: []*svcapitypes.Tag{ + {Key: aws.String("env"), Value: aws.String("prod")}, + }, + DeletionProtection: aws.Bool(true), + }, + }, + }, + out: &svcsdk.DescribeDBInstancesOutput{ + DBInstances: []*svcsdk.DBInstance{ + { + DeletionProtection: aws.Bool(true), + TagList: []*svcsdk.Tag{ + {Key: aws.String("aws:createdBy"), Value: aws.String("terraform")}, + {Key: aws.String("c7n:policy"), Value: aws.String("auto")}, + {Key: aws.String("env"), Value: aws.String("prod")}, + }, + }, + }, + }, + kube: test.NewMockClient(), + }, + want: want{ + upToDate: true, + err: nil, + statusAtProvider: &svcapitypes.CustomDBInstanceObservation{ + DatabaseRole: aws.String(databaseRoleStandalone), + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + cr := tc.args.cr + ce := newCustomExternal(tc.kube, nil) + upToDate, _, err := ce.isUpToDate(context.TODO(), cr, tc.args.out) + + if diff := cmp.Diff(tc.want.err, err); diff != "" { + t.Errorf("r: -want, +got error: \n%s", diff) + } + if diff := cmp.Diff(tc.want.upToDate, upToDate); diff != "" { + t.Errorf("r: -want, +got: \n%s", diff) + } + if diff := cmp.Diff(tc.want.statusAtProvider.DatabaseRole, cr.Status.AtProvider.DatabaseRole); diff != "" { + t.Errorf("r: -want, +got: \n%s", diff) + } + }) + } +} diff --git a/pkg/controller/rds/utils/tags.go b/pkg/controller/rds/utils/tags.go index f3da92849a..c0e7160b3a 100644 --- a/pkg/controller/rds/utils/tags.go +++ b/pkg/controller/rds/utils/tags.go @@ -19,6 +19,7 @@ package utils import ( "context" "sort" + "strings" svcsdk "github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/rds/rdsiface" @@ -35,6 +36,16 @@ const ( errCreateTags = "cannot create tags" ) +// ShouldIgnore returns true if `key` starts with any supplied prefix. +func ShouldIgnore(key string, prefixes []string) bool { + for _, p := range prefixes { + if strings.HasPrefix(key, p) { + return true + } + } + return false +} + // AreTagsUpToDate for spec and resourceName func AreTagsUpToDate(ctx context.Context, client rdsiface.RDSAPI, spec []*svcapitypes.Tag, resourceName *string) (bool, []*svcsdk.Tag, []*string, error) { current, err := ListTagsForResource(ctx, client, resourceName)