diff --git a/api/core/v1alpha1/tiproxy_types.go b/api/core/v1alpha1/tiproxy_types.go index ad2d79c9a72..51ca01bf770 100644 --- a/api/core/v1alpha1/tiproxy_types.go +++ b/api/core/v1alpha1/tiproxy_types.go @@ -159,6 +159,15 @@ type TiProxyTemplateSpec struct { Config ConfigFile `json:"config,omitempty"` UpdateStrategy UpdateStrategy `json:"updateStrategy,omitempty"` + // ExternalPDClusters defines additional backend PD clusters that TiProxy + // should connect to besides the local cluster referenced by spec.cluster. + // Operator will manage these clusters through TiProxy's reloadable + // backend-clusters config and HTTP config API. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=64 + ExternalPDClusters []TiProxyExternalPDCluster `json:"externalPDClusters,omitempty"` + Security *TiProxySecurity `json:"security,omitempty"` // Volumes defines data volume of TiProxy, it is optional. @@ -182,6 +191,24 @@ type TiProxyPreStop struct { SleepSeconds int32 `json:"sleepSeconds,omitempty"` } +type TiProxyExternalPDCluster struct { + // Name identifies the backend PD cluster inside TiProxy. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + Name string `json:"name"` + + // PDAddrs is a comma separated address list of PD servers, for example: + // "pd-0.example.com:2379,pd-1.example.com:2379". + // +kubebuilder:validation:MinLength=1 + PDAddrs string `json:"pdAddrs"` + + // NSServers defines the DNS servers TiProxy should use when resolving the + // backend cluster endpoints. + // +listType=set + // +kubebuilder:validation:MaxItems=16 + NSServers []string `json:"nsServers,omitempty"` +} + type TiProxySecurity struct { // Whether enable the TLS connection. TLS *TiProxyTLSConfig `json:"tls,omitempty"` @@ -205,13 +232,37 @@ type TiProxyServer struct { type TiProxyPorts struct { // Client defines port for TiProxy's SQL service. - Client *Port `json:"client,omitempty"` + Client *TiProxyPortOrRange `json:"client,omitempty"` // API defines port for TiProxy API service. API *Port `json:"api,omitempty"` // Peer defines port for TiProxy's peer service. Peer *Port `json:"peer,omitempty"` } +// TiProxyPortOrRange defines the main SQL port and optional extra listening range. +// The operator continues to use Port for Pod probes and the internal Service. +// Range is only propagated to TiProxy's proxy.port-range config. +// +kubebuilder:validation:XValidation:rule="!has(self.range) || self.range.start <= self.range.end",message="range.start must be less than or equal to range.end" +type TiProxyPortOrRange struct { + // Port defines the main SQL port exposed by the TiProxy Pod and Service. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port *int32 `json:"port,omitempty"` + + // Range defines additional SQL ports listened by TiProxy itself. + Range *TiProxyPortRange `json:"range,omitempty"` +} + +type TiProxyPortRange struct { + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Start int32 `json:"start"` + + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + End int32 `json:"end"` +} + type TiProxyProbes struct { // Readiness defines the readiness probe for TiProxy. // The default handler is a TCP socket on the client port. diff --git a/api/core/v1alpha1/zz_generated.deepcopy.go b/api/core/v1alpha1/zz_generated.deepcopy.go index 48f5e451113..044f28d46fb 100644 --- a/api/core/v1alpha1/zz_generated.deepcopy.go +++ b/api/core/v1alpha1/zz_generated.deepcopy.go @@ -4778,6 +4778,27 @@ func (in *TiProxy) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiProxyExternalPDCluster) DeepCopyInto(out *TiProxyExternalPDCluster) { + *out = *in + if in.NSServers != nil { + in, out := &in.NSServers, &out.NSServers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiProxyExternalPDCluster. +func (in *TiProxyExternalPDCluster) DeepCopy() *TiProxyExternalPDCluster { + if in == nil { + return nil + } + out := new(TiProxyExternalPDCluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TiProxyGroup) DeepCopyInto(out *TiProxyGroup) { *out = *in @@ -4935,13 +4956,55 @@ func (in *TiProxyList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiProxyPortOrRange) DeepCopyInto(out *TiProxyPortOrRange) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(TiProxyPortRange) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiProxyPortOrRange. +func (in *TiProxyPortOrRange) DeepCopy() *TiProxyPortOrRange { + if in == nil { + return nil + } + out := new(TiProxyPortOrRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TiProxyPortRange) DeepCopyInto(out *TiProxyPortRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiProxyPortRange. +func (in *TiProxyPortRange) DeepCopy() *TiProxyPortRange { + if in == nil { + return nil + } + out := new(TiProxyPortRange) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TiProxyPorts) DeepCopyInto(out *TiProxyPorts) { *out = *in if in.Client != nil { in, out := &in.Client, &out.Client - *out = new(Port) - **out = **in + *out = new(TiProxyPortOrRange) + (*in).DeepCopyInto(*out) } if in.API != nil { in, out := &in.API, &out.API @@ -5182,6 +5245,13 @@ func (in *TiProxyTemplateSpec) DeepCopyInto(out *TiProxyTemplateSpec) { in.Probes.DeepCopyInto(&out.Probes) in.Resources.DeepCopyInto(&out.Resources) out.UpdateStrategy = in.UpdateStrategy + if in.ExternalPDClusters != nil { + in, out := &in.ExternalPDClusters, &out.ExternalPDClusters + *out = make([]TiProxyExternalPDCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Security != nil { in, out := &in.Security, &out.Security *out = new(TiProxySecurity) diff --git a/manifests/crd/core.pingcap.com_tiproxies.yaml b/manifests/crd/core.pingcap.com_tiproxies.yaml index 3b5ef3248df..81c0b92179c 100644 --- a/manifests/crd/core.pingcap.com_tiproxies.yaml +++ b/manifests/crd/core.pingcap.com_tiproxies.yaml @@ -72,6 +72,43 @@ spec: config: description: Config defines config file of TiProxy. type: string + externalPDClusters: + description: |- + ExternalPDClusters defines additional backend PD clusters that TiProxy + should connect to besides the local cluster referenced by spec.cluster. + Operator will manage these clusters through TiProxy's reloadable + backend-clusters config and HTTP config API. + items: + properties: + name: + description: Name identifies the backend PD cluster inside TiProxy. + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + nsServers: + description: |- + NSServers defines the DNS servers TiProxy should use when resolving the + backend cluster endpoints. + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + pdAddrs: + description: |- + PDAddrs is a comma separated address list of PD servers, for example: + "pd-0.example.com:2379,pd-1.example.com:2379". + minLength: 1 + type: string + required: + - name + - pdAddrs + type: object + maxItems: 64 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map features: description: Features are enabled feature items: @@ -8538,11 +8575,34 @@ spec: description: Client defines port for TiProxy's SQL service. properties: port: + description: Port defines the main SQL port exposed by + the TiProxy Pod and Service. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - port + range: + description: Range defines additional SQL ports listened + by TiProxy itself. + properties: + end: + format: int32 + maximum: 65535 + minimum: 1 + type: integer + start: + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - end + - start + type: object type: object + x-kubernetes-validations: + - message: range.start must be less than or equal to range.end + rule: '!has(self.range) || self.range.start <= self.range.end' peer: description: Peer defines port for TiProxy's peer service. properties: diff --git a/manifests/crd/core.pingcap.com_tiproxygroups.yaml b/manifests/crd/core.pingcap.com_tiproxygroups.yaml index e08590f7b7d..45b87abe84e 100644 --- a/manifests/crd/core.pingcap.com_tiproxygroups.yaml +++ b/manifests/crd/core.pingcap.com_tiproxygroups.yaml @@ -213,6 +213,44 @@ spec: config: description: Config defines config file of TiProxy. type: string + externalPDClusters: + description: |- + ExternalPDClusters defines additional backend PD clusters that TiProxy + should connect to besides the local cluster referenced by spec.cluster. + Operator will manage these clusters through TiProxy's reloadable + backend-clusters config and HTTP config API. + items: + properties: + name: + description: Name identifies the backend PD cluster + inside TiProxy. + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + nsServers: + description: |- + NSServers defines the DNS servers TiProxy should use when resolving the + backend cluster endpoints. + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + pdAddrs: + description: |- + PDAddrs is a comma separated address list of PD servers, for example: + "pd-0.example.com:2379,pd-1.example.com:2379". + minLength: 1 + type: string + required: + - name + - pdAddrs + type: object + maxItems: 64 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map image: description: |- Image is TiProxy's image @@ -8836,11 +8874,35 @@ spec: service. properties: port: + description: Port defines the main SQL port exposed + by the TiProxy Pod and Service. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - port + range: + description: Range defines additional SQL ports + listened by TiProxy itself. + properties: + end: + format: int32 + maximum: 65535 + minimum: 1 + type: integer + start: + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - end + - start + type: object type: object + x-kubernetes-validations: + - message: range.start must be less than or equal + to range.end + rule: '!has(self.range) || self.range.start <= self.range.end' peer: description: Peer defines port for TiProxy's peer service. diff --git a/pkg/apiutil/core/v1alpha1/tiproxy.go b/pkg/apiutil/core/v1alpha1/tiproxy.go index b8fae44db0d..1a5de787982 100644 --- a/pkg/apiutil/core/v1alpha1/tiproxy.go +++ b/pkg/apiutil/core/v1alpha1/tiproxy.go @@ -23,7 +23,9 @@ import ( func TiProxyGroupClientPort(proxyg *v1alpha1.TiProxyGroup) int32 { if proxyg.Spec.Template.Spec.Server.Ports.Client != nil { - return proxyg.Spec.Template.Spec.Server.Ports.Client.Port + if proxyg.Spec.Template.Spec.Server.Ports.Client.Port != nil { + return *proxyg.Spec.Template.Spec.Server.Ports.Client.Port + } } return v1alpha1.DefaultTiProxyPortClient } @@ -44,11 +46,23 @@ func TiProxyGroupPeerPort(proxyg *v1alpha1.TiProxyGroup) int32 { func TiProxyClientPort(tiproxy *v1alpha1.TiProxy) int32 { if tiproxy.Spec.Server.Ports.Client != nil { - return tiproxy.Spec.Server.Ports.Client.Port + if tiproxy.Spec.Server.Ports.Client.Port != nil { + return *tiproxy.Spec.Server.Ports.Client.Port + } } return v1alpha1.DefaultTiProxyPortClient } +func TiProxyClientPortRange(tiproxy *v1alpha1.TiProxy) []int { + if tiproxy.Spec.Server.Ports.Client == nil || tiproxy.Spec.Server.Ports.Client.Range == nil { + return nil + } + return []int{ + int(tiproxy.Spec.Server.Ports.Client.Range.Start), + int(tiproxy.Spec.Server.Ports.Client.Range.End), + } +} + func TiProxyAPIPort(tiproxy *v1alpha1.TiProxy) int32 { if tiproxy.Spec.Server.Ports.API != nil { return tiproxy.Spec.Server.Ports.API.Port diff --git a/pkg/apiutil/core/v1alpha1/tiproxy_test.go b/pkg/apiutil/core/v1alpha1/tiproxy_test.go new file mode 100644 index 00000000000..f3f9110afba --- /dev/null +++ b/pkg/apiutil/core/v1alpha1/tiproxy_test.go @@ -0,0 +1,57 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package coreutil + +import ( + "testing" + + "github.com/stretchr/testify/require" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/api/v2/core/v1alpha1" +) + +func TestTiProxyClientPort(t *testing.T) { + tiproxy := &v1alpha1.TiProxy{} + require.Equal(t, int32(v1alpha1.DefaultTiProxyPortClient), TiProxyClientPort(tiproxy)) + + tiproxy.Spec.Server.Ports.Client = &v1alpha1.TiProxyPortOrRange{ + Range: &v1alpha1.TiProxyPortRange{ + Start: 10000, + End: 10002, + }, + } + require.Equal(t, int32(v1alpha1.DefaultTiProxyPortClient), TiProxyClientPort(tiproxy)) + require.Equal(t, []int{10000, 10002}, TiProxyClientPortRange(tiproxy)) + + tiproxy.Spec.Server.Ports.Client.Port = ptr.To[int32](7000) + require.Equal(t, int32(7000), TiProxyClientPort(tiproxy)) +} + +func TestTiProxyGroupClientPort(t *testing.T) { + proxyg := &v1alpha1.TiProxyGroup{} + require.Equal(t, int32(v1alpha1.DefaultTiProxyPortClient), TiProxyGroupClientPort(proxyg)) + + proxyg.Spec.Template.Spec.Server.Ports.Client = &v1alpha1.TiProxyPortOrRange{ + Range: &v1alpha1.TiProxyPortRange{ + Start: 10000, + End: 10002, + }, + } + require.Equal(t, int32(v1alpha1.DefaultTiProxyPortClient), TiProxyGroupClientPort(proxyg)) + + proxyg.Spec.Template.Spec.Server.Ports.Client.Port = ptr.To[int32](7000) + require.Equal(t, int32(7000), TiProxyGroupClientPort(proxyg)) +} diff --git a/pkg/configs/tiproxy/config.go b/pkg/configs/tiproxy/config.go index a1bcbb11fc5..7d62a3eb687 100644 --- a/pkg/configs/tiproxy/config.go +++ b/pkg/configs/tiproxy/config.go @@ -17,6 +17,7 @@ package tidb import ( "fmt" "path" + "strings" corev1 "k8s.io/api/core/v1" @@ -26,14 +27,18 @@ import ( stringutil "github.com/pingcap/tidb-operator/v2/pkg/utils/string" ) +const DefaultBackendClusterName = "default" + type Proxy struct { - Address string `toml:"addr"` - AdvertiseAddress string `toml:"advertise-addr"` - PDAddress string `toml:"pd-addrs"` + Address string `toml:"addr,omitempty" json:"addr,omitempty"` + AdvertiseAddress string `toml:"advertise-addr,omitempty" json:"advertise-addr,omitempty"` + PDAddress string `toml:"pd-addrs,omitempty" json:"pd-addrs,omitempty"` + PortRange []int `toml:"port-range,omitempty" json:"port-range,omitempty"` + BackendClusters []BackendCluster `toml:"backend-clusters,omitempty" json:"backend-clusters,omitempty"` } type API struct { - Address string `toml:"addr"` + Address string `toml:"addr,omitempty" json:"addr,omitempty"` } type TLSConfig struct { @@ -54,6 +59,12 @@ type Security struct { SQLTLS TLSConfig `toml:"sql-tls,omitempty"` } +type BackendCluster struct { + Name string `toml:"name,omitempty" json:"name,omitempty"` + PDAddrs string `toml:"pd-addrs,omitempty" json:"pd-addrs,omitempty"` + NSServers []string `toml:"ns-servers,omitempty" json:"ns-servers,omitempty"` +} + // Config is a subset config of TiProxy. // Only TiDB Operator managed fields are defined here. // ref: https://docs.pingcap.com/tidb/stable/tiproxy-configuration/ @@ -73,6 +84,12 @@ func (c *Config) Overlay(cluster *v1alpha1.Cluster, tiproxy *v1alpha1.TiProxy) e c.Proxy.Address = coreutil.ListenAddress(coreutil.TiProxyClientPort(tiproxy)) c.Proxy.AdvertiseAddress = getAdvertiseAddress(cluster, tiproxy) c.Proxy.PDAddress = stringutil.RemoveHTTPPrefix(cluster.Status.PD) + c.Proxy.PortRange = coreutil.TiProxyClientPortRange(tiproxy) + backendClusters, err := desiredBackendClusters(cluster, tiproxy) + if err != nil { + return err + } + c.Proxy.BackendClusters = backendClusters c.API.Address = coreutil.ListenAddress(coreutil.TiProxyAPIPort(tiproxy)) if coreutil.IsTLSClusterEnabled(cluster) { @@ -128,6 +145,14 @@ func (c *Config) Validate() error { fields = append(fields, "proxy.pd-address") } + if len(c.Proxy.PortRange) > 0 { + fields = append(fields, "proxy.port-range") + } + + if len(c.Proxy.BackendClusters) > 0 { + fields = append(fields, "proxy.backend-clusters") + } + if c.API.Address != "" { fields = append(fields, "api.address") } @@ -195,3 +220,42 @@ func getAdvertiseAddress(cluster *v1alpha1.Cluster, tiproxy *v1alpha1.TiProxy) s } return host } + +func desiredBackendClusters(cluster *v1alpha1.Cluster, tiproxy *v1alpha1.TiProxy) ([]BackendCluster, error) { + external := tiproxy.Spec.ExternalPDClusters + if len(external) == 0 { + return nil, nil + } + + clusters := make([]BackendCluster, 0, len(external)+1) + localPDAddrs := stringutil.RemoveHTTPPrefix(cluster.Status.PD) + seen := map[string]struct{}{} + if localPDAddrs != "" { + clusters = append(clusters, BackendCluster{ + Name: DefaultBackendClusterName, + PDAddrs: localPDAddrs, + }) + seen[DefaultBackendClusterName] = struct{}{} + } + for i := range external { + item := external[i] + name := strings.TrimSpace(item.Name) + if name == "" { + return nil, fmt.Errorf("externalPDClusters[%d].name is empty", i) + } + if _, ok := seen[name]; ok { + return nil, fmt.Errorf("externalPDClusters[%d].name conflicts with existing backend cluster name %q", i, name) + } + seen[name] = struct{}{} + pdAddrs := strings.TrimSpace(item.PDAddrs) + if pdAddrs == "" { + return nil, fmt.Errorf("externalPDClusters[%d].pdAddrs is empty", i) + } + clusters = append(clusters, BackendCluster{ + Name: name, + PDAddrs: pdAddrs, + NSServers: append([]string(nil), item.NSServers...), + }) + } + return clusters, nil +} diff --git a/pkg/configs/tiproxy/config_test.go b/pkg/configs/tiproxy/config_test.go index 64c27fd3bac..0677a886e06 100644 --- a/pkg/configs/tiproxy/config_test.go +++ b/pkg/configs/tiproxy/config_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" "github.com/pingcap/tidb-operator/api/v2/core/v1alpha1" ) @@ -43,6 +44,11 @@ func TestValidate(t *testing.T) { Address: "[::]:4000", AdvertiseAddress: "tiproxy-0.tiproxy-peer.default.svc", PDAddress: "pd:2379", + PortRange: []int{10000, 10002}, + BackendClusters: []BackendCluster{{ + Name: "cluster-a", + PDAddrs: "127.0.0.1:2379", + }}, }, API: API{ Address: "[::]:3080", @@ -60,6 +66,8 @@ func TestValidate(t *testing.T) { "proxy.address", "proxy.advertise-address", "proxy.pd-address", + "proxy.port-range", + "proxy.backend-clusters", "api.address", "security.server-sql-tls.cert", "security.server-sql-tls.key", @@ -123,6 +131,80 @@ func TestOverlay(t *testing.T) { }, wantErr: false, }, + { + name: "basic config without local pd", + cluster: &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db", + }, + }, + tiproxy: &v1alpha1.TiProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db-foo", + }, + Spec: v1alpha1.TiProxySpec{ + Subdomain: "db-tiproxy-peer", + }, + }, + want: &Config{ + Proxy: Proxy{ + Address: "[::]:6000", + AdvertiseAddress: "db-tiproxy-foo.db-tiproxy-peer.ns1.svc", + }, + API: API{ + Address: "[::]:3080", + }, + }, + wantErr: false, + }, + { + name: "config with client port range", + cluster: &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db", + }, + Status: v1alpha1.ClusterStatus{ + PD: "http://db-pd.ns1:2379", + }, + }, + tiproxy: &v1alpha1.TiProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db-foo", + }, + Spec: v1alpha1.TiProxySpec{ + Subdomain: "tiproxy-peer", + TiProxyTemplateSpec: v1alpha1.TiProxyTemplateSpec{ + Server: v1alpha1.TiProxyServer{ + Ports: v1alpha1.TiProxyPorts{ + Client: &v1alpha1.TiProxyPortOrRange{ + Port: ptr.To[int32](7000), + Range: &v1alpha1.TiProxyPortRange{ + Start: 10000, + End: 10002, + }, + }, + }, + }, + }, + }, + }, + want: &Config{ + Proxy: Proxy{ + Address: "[::]:7000", + AdvertiseAddress: "db-tiproxy-foo.tiproxy-peer.ns1.svc", + PDAddress: "db-pd.ns1:2379", + PortRange: []int{10000, 10002}, + }, + API: API{ + Address: "[::]:3080", + }, + }, + wantErr: false, + }, { name: "config with cluster TLS enabled", cluster: &v1alpha1.Cluster{ @@ -269,6 +351,132 @@ func TestOverlay(t *testing.T) { }, wantErr: false, }, + { + name: "config with external pd clusters", + cluster: &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db", + }, + Status: v1alpha1.ClusterStatus{ + PD: "http://db-pd.ns1:2379", + }, + }, + tiproxy: &v1alpha1.TiProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db-foo", + }, + Spec: v1alpha1.TiProxySpec{ + Subdomain: "tiproxy-peer", + TiProxyTemplateSpec: v1alpha1.TiProxyTemplateSpec{ + ExternalPDClusters: []v1alpha1.TiProxyExternalPDCluster{ + { + Name: "cluster-a", + PDAddrs: "127.0.0.2:2379", + NSServers: []string{"10.0.0.1", "10.0.0.2:53"}, + }, + }, + }, + }, + }, + want: &Config{ + Proxy: Proxy{ + Address: "[::]:6000", + AdvertiseAddress: "db-tiproxy-foo.tiproxy-peer.ns1.svc", + PDAddress: "db-pd.ns1:2379", + BackendClusters: []BackendCluster{ + { + Name: DefaultBackendClusterName, + PDAddrs: "db-pd.ns1:2379", + }, + { + Name: "cluster-a", + PDAddrs: "127.0.0.2:2379", + NSServers: []string{"10.0.0.1", "10.0.0.2:53"}, + }, + }, + }, + API: API{ + Address: "[::]:3080", + }, + }, + wantErr: false, + }, + { + name: "config with external pd clusters and no local pd", + cluster: &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db", + }, + }, + tiproxy: &v1alpha1.TiProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db-foo", + }, + Spec: v1alpha1.TiProxySpec{ + Subdomain: "tiproxy-peer", + TiProxyTemplateSpec: v1alpha1.TiProxyTemplateSpec{ + ExternalPDClusters: []v1alpha1.TiProxyExternalPDCluster{ + { + Name: "cluster-a", + PDAddrs: "127.0.0.2:2379", + NSServers: []string{"10.0.0.1", "10.0.0.2:53"}, + }, + }, + }, + }, + }, + want: &Config{ + Proxy: Proxy{ + Address: "[::]:6000", + AdvertiseAddress: "db-tiproxy-foo.tiproxy-peer.ns1.svc", + BackendClusters: []BackendCluster{ + { + Name: "cluster-a", + PDAddrs: "127.0.0.2:2379", + NSServers: []string{"10.0.0.1", "10.0.0.2:53"}, + }, + }, + }, + API: API{ + Address: "[::]:3080", + }, + }, + wantErr: false, + }, + { + name: "config with external pd cluster default name conflict", + cluster: &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db", + }, + Status: v1alpha1.ClusterStatus{ + PD: "http://db-pd.ns1:2379", + }, + }, + tiproxy: &v1alpha1.TiProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "db-foo", + }, + Spec: v1alpha1.TiProxySpec{ + Subdomain: "tiproxy-peer", + TiProxyTemplateSpec: v1alpha1.TiProxyTemplateSpec{ + ExternalPDClusters: []v1alpha1.TiProxyExternalPDCluster{ + { + Name: DefaultBackendClusterName, + PDAddrs: "127.0.0.2:2379", + }, + }, + }, + }, + }, + wantErr: true, + }, } for _, tt := range tests { diff --git a/pkg/controllers/tiproxy/builder.go b/pkg/controllers/tiproxy/builder.go index 106219be402..eb68f104fe4 100644 --- a/pkg/controllers/tiproxy/builder.go +++ b/pkg/controllers/tiproxy/builder.go @@ -39,9 +39,6 @@ func (r *Reconciler) NewRunner(state *tasks.ReconcileContext, reporter task.Task task.IfBreak(common.CondClusterIsDeleting(state), common.TaskInstanceFinalizerDel[scope.TiProxy](state, r.Client, common.DefaultInstanceSubresourceLister), ), - // return if cluster's status is not updated - task.IfBreak(common.CondClusterPDAddrIsNotRegistered(state)), - task.IfBreak(common.CondObjectIsDeleting[scope.TiProxy](state), common.TaskInstanceFinalizerDel[scope.TiProxy](state, r.Client, common.DefaultInstanceSubresourceLister), // TODO(liubo02): if the finalizer has been removed, no need to update status @@ -68,10 +65,12 @@ func (r *Reconciler) NewRunner(state *tasks.ReconcileContext, reporter task.Task tasks.TaskConfigMap(state, r.Client), common.TaskPVC[scope.TiProxy](state, r.Client, r.VolumeModifierFactory, tasks.PVCNewer()), tasks.TaskPod(state, r.Client), - common.TaskServerLabels[scope.TiProxy](state, r.Client, r.PDClientManager, func(ctx context.Context, labels map[string]string) error { - // TODO(liubo02): compare before setting - return state.TiProxyClient.SetLabels(ctx, labels) - }), + task.IfNot(common.CondClusterPDAddrIsNotRegistered(state), + common.TaskServerLabels[scope.TiProxy](state, r.Client, r.PDClientManager, func(ctx context.Context, labels map[string]string) error { + // TODO(liubo02): compare before setting + return state.TiProxyClient.SetLabels(ctx, labels) + }), + ), common.TaskInstanceConditionSynced[scope.TiProxy](state), common.TaskInstanceConditionReady[scope.TiProxy](state), common.TaskInstanceConditionRunning[scope.TiProxy](state), diff --git a/pkg/controllers/tiproxy/tasks/cm_test.go b/pkg/controllers/tiproxy/tasks/cm_test.go index 0bdf332a385..141599e6c91 100644 --- a/pkg/controllers/tiproxy/tasks/cm_test.go +++ b/pkg/controllers/tiproxy/tasks/cm_test.go @@ -45,6 +45,16 @@ func TestTaskConfigMap(t *testing.T) { }{ { desc: "no config", + state: &ReconcileContext{ + State: &state{ + tiproxy: fake.FakeObj[v1alpha1.TiProxy]("aaa-xxx"), + cluster: fake.FakeObj[v1alpha1.Cluster]("cluster"), + }, + }, + expectedStatus: task.SComplete, + }, + { + desc: "no config with local pd", state: &ReconcileContext{ State: &state{ tiproxy: fake.FakeObj[v1alpha1.TiProxy]("aaa-xxx"), @@ -110,6 +120,24 @@ func TestTaskConfigMap(t *testing.T) { }, expectedStatus: task.SComplete, }, + { + desc: "no local pd with external pd clusters", + state: &ReconcileContext{ + State: &state{ + tiproxy: fake.FakeObj("aaa-xxx", func(obj *v1alpha1.TiProxy) *v1alpha1.TiProxy { + obj.Spec.ExternalPDClusters = []v1alpha1.TiProxyExternalPDCluster{ + { + Name: "cluster-a", + PDAddrs: "127.0.0.1:2379", + }, + } + return obj + }), + cluster: fake.FakeObj[v1alpha1.Cluster]("cluster"), + }, + }, + expectedStatus: task.SComplete, + }, { desc: "update config map failed", state: &ReconcileContext{ diff --git a/pkg/controllers/tiproxy/tasks/pod_test.go b/pkg/controllers/tiproxy/tasks/pod_test.go index 5bd84bc9071..922dae2a67e 100644 --- a/pkg/controllers/tiproxy/tasks/pod_test.go +++ b/pkg/controllers/tiproxy/tasks/pod_test.go @@ -172,6 +172,66 @@ func TestTaskPod(t *testing.T) { expectedPodIsTerminating: true, expectedStatus: task.SWait, }, + { + desc: "external pd clusters changed, hot reload policy", + state: &ReconcileContext{ + State: &state{ + tiproxy: fake.FakeObj("aaa-xxx", func(obj *v1alpha1.TiProxy) *v1alpha1.TiProxy { + obj.Spec.Version = fakeVersion + obj.Spec.UpdateStrategy.Config = v1alpha1.ConfigUpdateStrategyHotReload + obj.Spec.ExternalPDClusters = []v1alpha1.TiProxyExternalPDCluster{ + { + Name: "cluster-a", + PDAddrs: "127.0.0.1:2379", + }, + } + return obj + }), + cluster: fake.FakeObj[v1alpha1.Cluster]("aaa"), + pod: fakePod( + fake.FakeObj[v1alpha1.Cluster]("aaa"), + fake.FakeObj("aaa-xxx", func(obj *v1alpha1.TiProxy) *v1alpha1.TiProxy { + obj.Spec.Version = fakeVersion + obj.Spec.UpdateStrategy.Config = v1alpha1.ConfigUpdateStrategyHotReload + return obj + }), + ), + }, + }, + + expectUpdatedPod: true, + expectedStatus: task.SComplete, + }, + { + desc: "external pd clusters changed, restart policy", + state: &ReconcileContext{ + State: &state{ + tiproxy: fake.FakeObj("aaa-xxx", func(obj *v1alpha1.TiProxy) *v1alpha1.TiProxy { + obj.Spec.Version = fakeVersion + obj.Spec.UpdateStrategy.Config = v1alpha1.ConfigUpdateStrategyRestart + obj.Spec.ExternalPDClusters = []v1alpha1.TiProxyExternalPDCluster{ + { + Name: "cluster-a", + PDAddrs: "127.0.0.1:2379", + }, + } + return obj + }), + cluster: fake.FakeObj[v1alpha1.Cluster]("aaa"), + pod: fakePod( + fake.FakeObj[v1alpha1.Cluster]("aaa"), + fake.FakeObj("aaa-xxx", func(obj *v1alpha1.TiProxy) *v1alpha1.TiProxy { + obj.Spec.Version = fakeVersion + obj.Spec.UpdateStrategy.Config = v1alpha1.ConfigUpdateStrategyRestart + return obj + }), + ), + }, + }, + + expectUpdatedPod: true, + expectedStatus: task.SComplete, + }, { desc: "pod labels changed, config not changed", state: &ReconcileContext{ diff --git a/pkg/reloadable/tiproxy.go b/pkg/reloadable/tiproxy.go index 721da820b67..460dd89d2fb 100644 --- a/pkg/reloadable/tiproxy.go +++ b/pkg/reloadable/tiproxy.go @@ -112,13 +112,16 @@ func equalTiProxyTemplate(c, p *v1alpha1.TiProxyTemplate) bool { p = convertTiProxyTemplate(p) c = convertTiProxyTemplate(c) // not equal only when current strategy is Restart and config is changed - if c.Spec.UpdateStrategy.Config == v1alpha1.ConfigUpdateStrategyRestart && p.Spec.Config != c.Spec.Config { + if c.Spec.UpdateStrategy.Config == v1alpha1.ConfigUpdateStrategyRestart && + p.Spec.Config != c.Spec.Config { return false } // ignore these fields p.Spec.Config = "" c.Spec.Config = "" + p.Spec.ExternalPDClusters = nil + c.Spec.ExternalPDClusters = nil p.Spec.UpdateStrategy.Config = "" c.Spec.UpdateStrategy.Config = "" diff --git a/tests/e2e/data/tiproxy.go b/tests/e2e/data/tiproxy.go index 3cb38ec244d..e694335b4a4 100644 --- a/tests/e2e/data/tiproxy.go +++ b/tests/e2e/data/tiproxy.go @@ -99,6 +99,12 @@ func WithHotReloadPolicyForTiProxy() GroupPatch[*v1alpha1.TiProxyGroup] { }) } +func WithTiProxyExternalPDClusters(clusters []v1alpha1.TiProxyExternalPDCluster) GroupPatch[*v1alpha1.TiProxyGroup] { + return GroupPatchFunc[*v1alpha1.TiProxyGroup](func(obj *v1alpha1.TiProxyGroup) { + obj.Spec.Template.Spec.ExternalPDClusters = clusters + }) +} + func WithTiProxyMaxSurge(maxSurge int32) GroupPatch[*v1alpha1.TiProxyGroup] { return GroupPatchFunc[*v1alpha1.TiProxyGroup](func(obj *v1alpha1.TiProxyGroup) { obj.Spec.MaxSurge = ptr.To(maxSurge) diff --git a/tests/e2e/tiproxy/tiproxy.go b/tests/e2e/tiproxy/tiproxy.go index 9ae5c2cd603..647ae455a33 100644 --- a/tests/e2e/tiproxy/tiproxy.go +++ b/tests/e2e/tiproxy/tiproxy.go @@ -17,18 +17,25 @@ package tiproxy import ( "context" "fmt" + "reflect" + "github.com/BurntSushi/toml" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/pingcap/tidb-operator/api/v2/core/v1alpha1" + tiproxycfg "github.com/pingcap/tidb-operator/v2/pkg/configs/tiproxy" "github.com/pingcap/tidb-operator/v2/pkg/runtime" "github.com/pingcap/tidb-operator/v2/pkg/runtime/scope" "github.com/pingcap/tidb-operator/v2/tests/e2e/data" "github.com/pingcap/tidb-operator/v2/tests/e2e/framework" + "github.com/pingcap/tidb-operator/v2/tests/e2e/framework/action" + "github.com/pingcap/tidb-operator/v2/tests/e2e/framework/desc" wopt "github.com/pingcap/tidb-operator/v2/tests/e2e/framework/workload" "github.com/pingcap/tidb-operator/v2/tests/e2e/label" "github.com/pingcap/tidb-operator/v2/tests/e2e/utils/cert" @@ -216,6 +223,126 @@ var _ = ginkgo.Describe("TiProxy", label.TiProxy, func() { f.Must(waiter.WaitForPodsRecreated(ctx, f.Client, runtime.FromTiProxyGroup(proxyg), *changeTime, waiter.LongTaskTimeout)) f.WaitForTiProxyGroupReady(ctx, proxyg) }) + + ginkgo.It("support external backend clusters without local pd", label.P1, label.FeatureHotReload, func(ctx context.Context) { + proxyg := action.MustCreateTiProxy( + ctx, + f, + desc.DefaultOptions(), + data.WithReplicas[scope.TiProxyGroup](1), + data.WithHotReloadPolicyForTiProxy(), + data.WithTiProxyExternalPDClusters([]v1alpha1.TiProxyExternalPDCluster{ + { + Name: "cluster-a", + PDAddrs: "127.0.0.11:2379", + NSServers: []string{"10.0.0.1", "10.0.0.2:53"}, + }, + }), + ) + + gomega.Eventually(func() error { + proxies := &v1alpha1.TiProxyList{} + if err := f.Client.List(ctx, proxies, client.InNamespace(proxyg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: proxyg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: proxyg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiProxy, + }); err != nil { + return err + } + if len(proxies.Items) != 1 { + return fmt.Errorf("expected 1 TiProxy, got %d", len(proxies.Items)) + } + cond := meta.FindStatusCondition(proxies.Items[0].Status.Conditions, v1alpha1.CondSynced) + if cond == nil || cond.Status != metav1.ConditionTrue { + return fmt.Errorf("TiProxy is not synced: %#v", cond) + } + return nil + }).WithTimeout(waiter.LongTaskTimeout).WithPolling(waiter.Poll).Should(gomega.Succeed()) + + gomega.Eventually(func() error { + cms := &corev1.ConfigMapList{} + if err := f.Client.List(ctx, cms, client.InNamespace(proxyg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: proxyg.Spec.Cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiProxy, + }); err != nil { + return err + } + if len(cms.Items) != 1 { + return fmt.Errorf("expected 1 TiProxy ConfigMap, got %d", len(cms.Items)) + } + + var cfg tiproxycfg.Config + if err := toml.Unmarshal([]byte(cms.Items[0].Data[v1alpha1.FileNameConfig]), &cfg); err != nil { + return err + } + if cfg.Proxy.PDAddress != "" { + return fmt.Errorf("expected empty proxy.pd-addrs, got %q", cfg.Proxy.PDAddress) + } + expected := []tiproxycfg.BackendCluster{ + { + Name: "cluster-a", + PDAddrs: "127.0.0.11:2379", + NSServers: []string{"10.0.0.1", "10.0.0.2:53"}, + }, + } + if !reflect.DeepEqual(expected, cfg.Proxy.BackendClusters) { + return fmt.Errorf("unexpected backend clusters: %#v", cfg.Proxy.BackendClusters) + } + return nil + }).WithTimeout(waiter.LongTaskTimeout).WithPolling(waiter.Poll).Should(gomega.Succeed()) + }) + + ginkgo.It("support tiproxy without any pd configured", label.P1, label.FeatureHotReload, func(ctx context.Context) { + proxyg := f.MustCreateTiProxy( + ctx, + data.WithReplicas[scope.TiProxyGroup](1), + data.WithHotReloadPolicyForTiProxy(), + ) + + gomega.Eventually(func() error { + proxies := &v1alpha1.TiProxyList{} + if err := f.Client.List(ctx, proxies, client.InNamespace(proxyg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: proxyg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: proxyg.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiProxy, + }); err != nil { + return err + } + if len(proxies.Items) != 1 { + return fmt.Errorf("expected 1 TiProxy, got %d", len(proxies.Items)) + } + cond := meta.FindStatusCondition(proxies.Items[0].Status.Conditions, v1alpha1.CondSynced) + if cond == nil || cond.Status != metav1.ConditionTrue { + return fmt.Errorf("TiProxy is not synced: %#v", cond) + } + return nil + }).WithTimeout(waiter.LongTaskTimeout).WithPolling(waiter.Poll).Should(gomega.Succeed()) + + gomega.Eventually(func() error { + cms := &corev1.ConfigMapList{} + if err := f.Client.List(ctx, cms, client.InNamespace(proxyg.Namespace), client.MatchingLabels{ + v1alpha1.LabelKeyCluster: proxyg.Spec.Cluster.Name, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiProxy, + }); err != nil { + return err + } + if len(cms.Items) != 1 { + return fmt.Errorf("expected 1 TiProxy ConfigMap, got %d", len(cms.Items)) + } + + var cfg tiproxycfg.Config + if err := toml.Unmarshal([]byte(cms.Items[0].Data[v1alpha1.FileNameConfig]), &cfg); err != nil { + return err + } + if cfg.Proxy.PDAddress != "" { + return fmt.Errorf("expected empty proxy.pd-addrs, got %q", cfg.Proxy.PDAddress) + } + if len(cfg.Proxy.BackendClusters) != 0 { + return fmt.Errorf("expected no backend clusters, got %#v", cfg.Proxy.BackendClusters) + } + return nil + }).WithTimeout(waiter.LongTaskTimeout).WithPolling(waiter.Poll).Should(gomega.Succeed()) + }) }) ginkgo.Context("TLS", label.P0, label.FeatureTLS, func() {