From e88e0aff9f6b3c0286a3a107036d48c75d002d70 Mon Sep 17 00:00:00 2001 From: Dmitriy Alekseev <1865999+dragoangel@users.noreply.github.com> Date: Mon, 26 Aug 2024 12:17:59 +0200 Subject: [PATCH] feat(cluster): Add replication slots configuration Signed-off-by: Dmitriy Alekseev <1865999+dragoangel@users.noreply.github.com> --- charts/cluster/README.md | 1 + charts/cluster/templates/cluster.yaml | 4 ++++ ..._default_configuration_cluster-assert.yaml | 5 +++++ .../01-non_default_configuration_cluster.yaml | 5 +++++ charts/cluster/values.schema.json | 19 +++++++++++++++++++ charts/cluster/values.yaml | 9 +++++++++ 6 files changed, 43 insertions(+) diff --git a/charts/cluster/README.md b/charts/cluster/README.md index b8afdc5ef..f1e774b73 100644 --- a/charts/cluster/README.md +++ b/charts/cluster/README.md @@ -170,6 +170,7 @@ refer to the [CloudNativePG Documentation](https://cloudnative-pg.io/documentat | cluster.primaryUpdateMethod | string | `"switchover"` | Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated. It can be switchover (default) or restart. | | cluster.primaryUpdateStrategy | string | `"unsupervised"` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (unsupervised - default) or manual (supervised) | | cluster.priorityClassName | string | `""` | | +| cluster.replicationSlots | object | `{"highAvailability":{"enabled":true,"slotPrefix":"_cnpg_"},"updateInterval":30}` | This feature automatically manages physical replication slots for each hot standby replica in the High Availability cluster, both in the primary and the standby. See: https://cloudnative-pg.io/documentation/current/replication/#replication-slots-for-high-availability | | cluster.resources | object | `{}` | Resources requirements of every generated Pod. Please refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information. We strongly advise you use the same setting for limits and requests so that your cluster pods are given a Guaranteed QoS. See: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/ | | cluster.roles | list | `[]` | This feature enables declarative management of existing roles, as well as the creation of new roles if they are not already present in the database. See: https://cloudnative-pg.io/documentation/current/declarative_role_management/ | | cluster.storage.size | string | `"8Gi"` | | diff --git a/charts/cluster/templates/cluster.yaml b/charts/cluster/templates/cluster.yaml index 2dace8e34..22cc073fa 100644 --- a/charts/cluster/templates/cluster.yaml +++ b/charts/cluster/templates/cluster.yaml @@ -51,6 +51,10 @@ spec: superuserSecret: name: {{ . }} {{ end }} + {{- with .Values.cluster.replicationSlots }} + replicationSlots: + {{- toYaml . | nindent 4 }} + {{ end }} postgresql: shared_preload_libraries: {{- if eq .Values.type "timescaledb" }} diff --git a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml b/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml index 921d354aa..7a8a7eb8b 100644 --- a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml +++ b/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster-assert.yaml @@ -29,6 +29,11 @@ spec: - CREATE TABLE mytable (id serial PRIMARY KEY, name VARCHAR(255)); superuserSecret: name: supersecret-secret + replicationSlots: + highAvailability: + enabled: false + slotPrefix: _cnpg_cust_ + updateInterval: "60" enableSuperuserAccess: true certificates: serverCASecret: ca-secret diff --git a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml b/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml index a68ef8df2..31196ce75 100644 --- a/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml +++ b/charts/cluster/test/postgresql-cluster-configuration/01-non_default_configuration_cluster.yaml @@ -42,6 +42,11 @@ cluster: clientCASecret: client-ca-secret enableSuperuserAccess: true superuserSecret: supersecret-secret + replicationSlots: + highAvailability: + enabled: false + slotPrefix: _cnpg_cust_ + updateInterval: "60" roles: - name: dante ensure: present diff --git a/charts/cluster/values.schema.json b/charts/cluster/values.schema.json index a2354a8f8..5731e8d4d 100644 --- a/charts/cluster/values.schema.json +++ b/charts/cluster/values.schema.json @@ -253,6 +253,25 @@ "priorityClassName": { "type": "string" }, + "replicationSlots": { + "type": "object", + "properties": { + "highAvailability": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "slotPrefix": { + "type": "string" + } + } + }, + "updateInterval": { + "type": "integer" + } + } + }, "resources": { "type": "object" }, diff --git a/charts/cluster/values.yaml b/charts/cluster/values.yaml index 7aa766d0f..3eb674933 100644 --- a/charts/cluster/values.yaml +++ b/charts/cluster/values.yaml @@ -153,6 +153,15 @@ cluster: enableSuperuserAccess: true superuserSecret: "" + # -- This feature automatically manages physical replication slots for each hot standby replica in + # the High Availability cluster, both in the primary and the standby. + # See: https://cloudnative-pg.io/documentation/current/replication/#replication-slots-for-high-availability + replicationSlots: + highAvailability: + enabled: true + slotPrefix: _cnpg_ + updateInterval: 30 + # -- This feature enables declarative management of existing roles, as well as the creation of new roles if they are not # already present in the database. # See: https://cloudnative-pg.io/documentation/current/declarative_role_management/