diff --git a/.github/workflows/operator-integration.yml b/.github/workflows/operator-integration.yml index a6dd62ff..0d9da317 100644 --- a/.github/workflows/operator-integration.yml +++ b/.github/workflows/operator-integration.yml @@ -54,7 +54,7 @@ jobs: cd charts/apache-shardingsphere-operator-charts/ helm dependency build cd ../ - helm install shardingsphere-operator apache-shardingsphere-operator-charts -n shardingsphere-operator --set replicaCount=1 --set operator.featureGates.computeNode=true + helm install shardingsphere-operator apache-shardingsphere-operator-charts -n shardingsphere-operator --set replicaCount=1 --set operator.featureGates.computeNode=true --set operator.image.tag=0.2.0 sleep 60 kubectl wait --timeout=120s --for=condition=Ready --all pod -n shardingsphere-operator kubectl get pod -n shardingsphere-operator --show-labels diff --git a/charts/apache-shardingsphere-operator-charts/Chart.yaml b/charts/apache-shardingsphere-operator-charts/Chart.yaml index 007636ad..7408e757 100644 --- a/charts/apache-shardingsphere-operator-charts/Chart.yaml +++ b/charts/apache-shardingsphere-operator-charts/Chart.yaml @@ -29,5 +29,5 @@ dependencies: - bitnami-common version: 1.16.1 type: application -version: 0.2.0 -appVersion: "5.3.1" +version: 0.3.0 +appVersion: "0.3.0" diff --git a/charts/apache-shardingsphere-operator-charts/README.md b/charts/apache-shardingsphere-operator-charts/README.md index 11df9a8c..f93cf780 100644 --- a/charts/apache-shardingsphere-operator-charts/README.md +++ b/charts/apache-shardingsphere-operator-charts/README.md @@ -16,113 +16,6 @@ Use the following command to uninstall: helm unstall [RELEASE_NAME] ``` -## Try ComputeNode -Use the following command to install: -```shell -helm install [RELEASE_NAME] shardingsphere/apache-shardingsphere-operator-charts --set operator.featureGates.computeNode=true --set proxyCluster.enabled=false -``` - -## Parameters -### Common parameters -| Name | Description | Value | -|-------------------|-----------------------------------------------------------------------------------------------------------|------------------------| -| `nameOverride` | nameOverride String to partially override common.names.fullname template (will maintain the release name) | `shardingsphere-proxy` | - -### ShardingSphere Operator Parameters -| Name | Description | Value | -|-----------------------------------| ------------------------------------------- |-------------------------------------------------------------------------| -| `operator.replicaCount` | operator replica count | `2` | -| `operator.image.repository` | operator image name | `apache/shardingsphere-operator` | -| `operator.image.pullPolicy` | image pull policy | `IfNotPresent` | -| `operator.image.tag` | image tag | `0.2.0` | -| `operator.imagePullSecrets` | image pull secret of private repository | `[]` | -| `operator.resources` | operator Resources required by the operator | `{}` | -| `operator.health.healthProbePort` | operator health check port | `8080` | - -### ShardingSphere ProxyCluster Parameters - -| Name | Description | Value | -|--------------------------------------------------| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |-------------| -| `proxyCluster.replicaCount` | ShardingSphere-Proxy cluster starts the number of replicas, Note: After you enable automaticScaling, this parameter will no longer take effect | `3` | -| `proxyCluster.proxyVersion` | ShardingSphere-Proxy cluster version | `5.3.1` | -| `proxyCluster.automaticScaling.enable` | ShardingSphere-Proxy Whether the ShardingSphere-Proxy cluster has auto-scaling enabled | `false` | -| `proxyCluster.automaticScaling.scaleUpWindows` | ShardingSphere-Proxy automatically scales the stable window | `30` | -| `proxyCluster.automaticScaling.scaleDownWindows` | ShardingSphere-Proxy automatically shrinks the stabilized window | `30` | -| `proxyCluster.automaticScaling.target` | ShardingSphere-Proxy auto-scaling threshold, the value is a percentage, note: at this stage, only cpu is supported as a metric for scaling | `20` | -| `proxyCluster.automaticScaling.maxInstance` | ShardingSphere-Proxy maximum number of scaled-out replicas | `4` | -| `proxyCluster.automaticScaling.minInstance` | ShardingSphere-Proxy has a minimum number of boot replicas, and the shrinkage will not be less than this number of replicas | `1` | -| `proxyCluster.resources` | ShardingSphere-Proxy starts the requirement resource, and after opening automaticScaling, the resource of the request multiplied by the percentage of target is used to trigger the scaling action | `{}` | -| `proxyCluster.service.type` | ShardingSphere-Proxy external exposure mode | `ClusterIP` | -| `proxyCluster.service.port` | ShardingSphere-Proxy exposes port | `3307` | -| `proxyCluster.startPort` | ShardingSphere-Proxy boot port | `3307` | -| `proxyCluster.mySQLDriver.version` | ShardingSphere-Proxy The ShardingSphere-Proxy mysql driver version will not be downloaded if it is empty | `5.1.47` | - - -### ShardingSphere ProxyCluster ServerConfiguration Authority Parameters - -| Name | Description | Value | -|---------------------------------------------------------| ---------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| `proxyCluster.serverConfig.authority.privilege.type` | authority provider for storage node, the default value is ALL_PERMITTED | `ALL_PRIVILEGES_PERMITTED` | -| `proxyCluster.serverConfig.authority.users[0].password` | Password for compute node. | `root` | -| `proxyCluster.serverConfig.authority.users[0].user` | Username,authorized host for compute node. Format: @ hostname is % or empty string means do not care about authorized host | `root@%` | - - -### ShardingSphere ProxyCluster ServerConfiguration Mode Parameters - -| Name | Description | Value | -|--------------------------------------------------------------------------------| ------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `proxyCluster.serverConfig.mode.type` | Type of mode configuration. Now only support Cluster mode | `Cluster` | -| `proxyCluster.serverConfig.mode.repository.props.namespace` | Namespace of registry center | `governance_ds` | -| `proxyCluster.serverConfig.mode.repository.props.server-lists` | Server lists of registry center | `{{ printf "%s-zookeeper.%s:2181" .Release.Name .Release.Namespace }}` | -| `proxyCluster.serverConfig.mode.repository.props.maxRetries` | Max retries of client connection | `3` | -| `proxyCluster.serverConfig.mode.repository.props.operationTimeoutMilliseconds` | Milliseconds of operation timeout | `5000` | -| `proxyCluster.serverConfig.mode.repository.props.retryIntervalMilliseconds` | Milliseconds of retry interval | `500` | -| `proxyCluster.serverConfig.mode.repository.props.timeToLiveSeconds` | Seconds of ephemeral data live | `600` | -| `proxyCluster.serverConfig.mode.repository.type` | Type of persist repository. Now only support ZooKeeper | `ZooKeeper` | -| `proxyCluster.serverConfig.mode.overwrite` | Whether overwrite persistent configuration with local configuration | `true` | -| `proxyCluster.serverConfig.props.proxy-frontend-database-protocol-type` | Default startup protocol | `MySQL` | - - -### ZooKeeper Parameters - -| Name | Description | Value | -| ------------------------------------ | ---------------------------------------------------- | ------------------- | -| `zookeeper.enabled` | Switch to enable or disable the ZooKeeper helm chart | `true` | -| `zookeeper.replicaCount` | Number of ZooKeeper nodes | `1` | -| `zookeeper.persistence.enabled` | Enable persistence on ZooKeeper using PVC(s) | `false` | -| `zookeeper.persistence.storageClass` | Persistent Volume storage class | `""` | -| `zookeeper.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | -| `zookeeper.persistence.size` | Persistent Volume size | `8Gi` | - - -### ShardingSphere ComputeNode Parameters - -| Name | Description | Value | -| --------------------------------------------| ------------------------------------------------------------------------------------------------------ | ------------------- | -| `computeNode.storageNodeConnector.type` | ShardingSphere-Proxy driver type | `mysql` | -| `computeNode.storageNodeConnector.version` | ShardingSphere-Proxy driver version. The MySQL driver need to be downloaded according to this version | `5.1.47` | -| `computeNode.serverVersion` | ShardingSphere-Proxy cluster version | `5.3.1` | -| `computeNode.portBindings[0].name` | ShardingSphere-Proxy port name | `3307` | -| `computeNode.portBindings[0].containerPort` | ShardingSphere-Proxy port for container | `3307` | -| `computeNode.portBindings[0].servicePort` | ShardingSphere-Proxy port for service | `3307` | -| `computeNode.portBindings[0].procotol` | ShardingSphere-Proxy port protocol | `TCP` | -| `computeNode.serviceType` | ShardingSphere-Proxy service type | `ClusterIP` | - - -### ShardingSphere ComputeNode Bootstrap Parameters +## Manual -| Name | Description | Value | -|--------------------------------------------------------------------------------| ------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `computeNode.bootstrap.serverConfig.authority.privilege.type` | authority provider for storage node, the default value is ALL_PERMITTED | `ALL_PRIVILEGES_PERMITTED` | -| `computeNode.bootstrap.serverConfig.authority.users[0].user` | Username,authorized host for compute node. Format: @ hostname is % or empty string means do not care about authorized host | `root@%` | -| `computeNode.bootstrap.serverConfig.authority.users[0].password` | Password for compute node. | `root` | -| `computeNode.bootstrap.serverConfig.mode.type` | Type of mode configuration. Now only support Cluster mode | `Cluster` | -| `computeNode.bootstrap.serverConfig.mode.repository.type` | Type of persist repository. Now only support ZooKeeper | `ZooKeeper` | -| `computeNode.bootstrap.mode.repository.props.timeToLiveSeconds` | Seconds of ephemeral data live | `600` | -| `computeNode.bootstrap.serverConfig.mode.repository.props.serverlists` | Server lists of registry center | `{{ printf "%s-zookeeper.%s:2181" .Release.Name .Release.Namespace }}` | -| `computeNode.bootstrap.serverConfig.mode.repository.props.retryIntervalMilliseconds` | Milliseconds of retry interval | `500` | -| `computeNode.bootstrap.serverConfig.mode.repository.props.operationTimeoutMilliseconds` | Milliseconds of operation timeout | `5000` | -| `computeNode.bootstrap.serverConfig.mode.repository.props.namespace` | Namespace of registry center | `governance_ds` | -| `computeNode.bootstrap.serverConfig.mode.repository.props.maxRetries` | Max retries of client connection | `3` | -| `computeNode.bootstrap.serverConfig.mode.overwrite` | Whether overwrite persistent configuration with local configuration | `true` | -| `computeNode.bootstrap.serverConfig.props.proxy-frontend-database-protocol-type` | Default startup protocol | `MySQL` | +For further instructions, please check out the [Apache ShardingSphere on Cloud official documentations](https://shardingsphere.apache.org/oncloud/current/en/overview/). diff --git a/charts/apache-shardingsphere-operator-charts/templates/operator_deployment.yaml b/charts/apache-shardingsphere-operator-charts/templates/operator_deployment.yaml index f4d4f969..3b58348f 100644 --- a/charts/apache-shardingsphere-operator-charts/templates/operator_deployment.yaml +++ b/charts/apache-shardingsphere-operator-charts/templates/operator_deployment.yaml @@ -15,11 +15,6 @@ # limitations under the License. # -# Check Zookeeper settings -# if enabled zookeeper's persistence, should set correct storageClass for it -{{- if .Values.zookeeper.persistence.enabled }} -{{- required "Detected .Values.zookeeper.persistence.enabled is true, please set a correct .Values.zookeeper.persistence.storageClass !" .Values.zookeeper.persistence.storageClass -}} -{{- end }} --- apiVersion: apps/v1 kind: Deployment @@ -44,7 +39,7 @@ spec: - --health-probe-bind-address=:{{ .Values.operator.health.healthProbePort }} - --leader-elect {{- if eq .Values.operator.featureGates.computeNode true }} - - --feature-gates=ComputeNode=true{{- if eq .Values.operator.featureGates.storageNode true }},StorageNode=true{{- end }} + - --feature-gates=ComputeNode=true{{- if eq .Values.operator.featureGates.storageNode true }},StorageNode=true{{- end }}{{- if eq .Values.operator.featureGates.chaos true }},Chaos=true {{- end }} {{- end }} {{- if eq .Values.operator.storageNodeProviders.aws.enabled true }} - --aws-region={{ .Values.operator.storageNodeProviders.aws.region }} diff --git a/charts/apache-shardingsphere-operator-charts/templates/operator_rbac.yaml b/charts/apache-shardingsphere-operator-charts/templates/operator_rbac.yaml index ec41a8c6..3e4b5a76 100644 --- a/charts/apache-shardingsphere-operator-charts/templates/operator_rbac.yaml +++ b/charts/apache-shardingsphere-operator-charts/templates/operator_rbac.yaml @@ -77,254 +77,302 @@ kind: ClusterRole metadata: name: {{ template "operator.name" . }} rules: - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - event - verbs: - - create - - patch - - apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - apps - resources: - - deployments/status - verbs: - - get - - apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers/status - verbs: - - get - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - pods/status - verbs: - - get - - apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - services/status - verbs: - - get - - apiGroups: - - postgresql.cnpg.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - shardingsphere.apache.org - resources: - - chaos - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - shardingsphere.apache.org - resources: - - chaos/finalizers - verbs: - - update - - apiGroups: - - shardingsphere.apache.org - resources: - - chaos/status - verbs: - - get - - patch - - update - - apiGroups: - - shardingsphere.apache.org - resources: - - computenodes - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - shardingsphere.apache.org - resources: - - computenodes/status - verbs: - - get - - patch - - update - - apiGroups: - - shardingsphere.apache.org - resources: - - shardingsphereproxies - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - shardingsphere.apache.org - resources: - - shardingsphereproxies/finalizers - verbs: - - update - - apiGroups: - - shardingsphere.apache.org - resources: - - shardingsphereproxies/status - verbs: - - get - - patch - - update - - apiGroups: - - shardingsphere.apache.org - resources: - - shardingsphereproxyserverconfigs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - shardingsphere.apache.org - resources: - - shardingsphereproxyserverconfigs/finalizers - verbs: - - update - - apiGroups: - - shardingsphere.apache.org - resources: - - shardingsphereproxyserverconfigs/status - verbs: - - get - - patch - - update - - apiGroups: - - shardingsphere.apache.org - resources: - - storagenodes - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - shardingsphere.apache.org - resources: - - storagenodes/finalizers - verbs: - - update - - apiGroups: - - shardingsphere.apache.org - resources: - - storagenodes/status - verbs: - - get - - patch - - update - - apiGroups: - - shardingsphere.apache.org - resources: - - storageproviders - verbs: - - get - - list - - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - event + verbs: + - create + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments/status + verbs: + - get +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers/status + verbs: + - get +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - chaos-mesh.org + resources: + - networkchaos + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - chaos-mesh.org + resources: + - podchaos + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - chaos-mesh.org + resources: + - stresschaos + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - services/status + verbs: + - get +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - shardingsphere.apache.org + resources: + - chaos + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - shardingsphere.apache.org + resources: + - chaos/finalizers + verbs: + - update +- apiGroups: + - shardingsphere.apache.org + resources: + - chaos/status + verbs: + - get + - patch + - update +- apiGroups: + - shardingsphere.apache.org + resources: + - computenodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - shardingsphere.apache.org + resources: + - computenodes/status + verbs: + - get + - patch + - update +- apiGroups: + - shardingsphere.apache.org + resources: + - shardingsphereproxies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - shardingsphere.apache.org + resources: + - shardingsphereproxies/finalizers + verbs: + - update +- apiGroups: + - shardingsphere.apache.org + resources: + - shardingsphereproxies/status + verbs: + - get + - patch + - update +- apiGroups: + - shardingsphere.apache.org + resources: + - shardingsphereproxyserverconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - shardingsphere.apache.org + resources: + - shardingsphereproxyserverconfigs/finalizers + verbs: + - update +- apiGroups: + - shardingsphere.apache.org + resources: + - shardingsphereproxyserverconfigs/status + verbs: + - get + - patch + - update +- apiGroups: + - shardingsphere.apache.org + resources: + - storagenodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - shardingsphere.apache.org + resources: + - storagenodes/finalizers + verbs: + - update +- apiGroups: + - shardingsphere.apache.org + resources: + - storagenodes/status + verbs: + - get + - patch + - update +- apiGroups: + - shardingsphere.apache.org + resources: + - storageproviders + verbs: + - get + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/charts/apache-shardingsphere-operator-charts/values.yaml b/charts/apache-shardingsphere-operator-charts/values.yaml index 3cbccdcd..c01a3785 100644 --- a/charts/apache-shardingsphere-operator-charts/values.yaml +++ b/charts/apache-shardingsphere-operator-charts/values.yaml @@ -34,7 +34,7 @@ operator: pullPolicy: IfNotPresent ## @param image.tag image tag ## - tag: "0.2.0" + tag: "0.3.0" ## @param imagePullSecrets image pull secret of private repository ## e.g: ## imagePullSecrets: @@ -64,6 +64,7 @@ operator: featureGates: computeNode: false storageNode: false + chaos: false storageNodeProviders: aws: @@ -76,235 +77,3 @@ operator: region: "" accessKeyId: "" secretAccessKey: "" - -## @section ShardingSphere-Proxy cluster parameters -proxyCluster: - enabled: true - ## @param replicaCount ShardingSphere-Proxy cluster starts the number of replicas, Note: After you enable automaticScaling, this parameter will no longer take effect - ## @param proxyVersion ShardingSphere-Proxy cluster version - ## - replicaCount: "3" - proxyVersion: "5.3.1" - ## @param automaticScaling.enable ShardingSphere-Proxy Whether the ShardingSphere-Proxy cluster has auto-scaling enabled - ## @param automaticScaling.scaleUpWindows ShardingSphere-Proxy automatically scales the stable window - ## @param automaticScaling.scaleDownWindows ShardingSphere-Proxy automatically shrinks the stabilized window - ## @param automaticScaling.target ShardingSphere-Proxy auto-scaling threshold, the value is a percentage, note: at this stage, only cpu is supported as a metric for scaling - ## @param automaticScaling.maxInstance ShardingSphere-Proxy maximum number of scaled-out replicas - ## @param automaticScaling.minInstance ShardingSphere-Proxy has a minimum number of boot replicas, and the shrinkage will not be less than this number of replicas - ## - automaticScaling: - enable: false - scaleUpWindows: 30 - scaleDownWindows: 30 - target: 20 - maxInstance: 4 - minInstance: 1 - ## @param resources ShardingSphere-Proxy starts the requirement resource, and after opening automaticScaling, the resource of the request multiplied by the percentage of target is used to trigger the scaling action - ## e.g: - ## resources: - ## limits: - ## cpu: 2 - ## memory: 2Gi - ## requests: - ## cpu: 2 - ## memory: 2Gi - ## - resources: { } - ## @param service.type ShardingSphere-Proxy external exposure mode - ## @param service.port ShardingSphere-Proxy exposes port - ## - service: - type: ClusterIP - port: 3307 - ## @param startPort ShardingSphere-Proxy boot port - ## - startPort: 3307 - ## @param mySQLDriver.version ShardingSphere-Proxy The ShardingSphere-Proxy mysql driver version will not be downloaded if it is empty - ## - mySQLDriver: - version: "5.1.47" - ## @param imagePullSecrets ShardingSphere-Proxy pull private image repository key - ## e.g: - ## imagePullSecrets: - ## - name: mysecret - ## - imagePullSecrets: [ ] - ## @section ShardingSphere-Proxy ServerConfiguration parameters - ## NOTE: If you use the sub-charts to deploy Zookeeper, the server-lists field must be "{{ printf \"%s-zookeeper.%s:2181\" .Release.Name .Release.Namespace }}", - ## otherwise please fill in the correct zookeeper address - ## The server.yaml is auto-generated based on this parameter. - ## If it is empty, the server.yaml is also empty. - ## ref: https://shardingsphere.apache.org/document/current/en/user-manual/shardingsphere-jdbc/yaml-config/mode/ - ## ref: https://shardingsphere.apache.org/document/current/en/user-manual/shardingsphere-jdbc/builtin-algorithm/metadata-repository/ - ## - serverConfig: - ## @section Compute-Node ShardingSphere-Proxy ServerConfiguration authority parameters - ## NOTE: It is used to set up initial user to login compute node, and authority data of storage node. - ## @param serverConfig.authority.privilege.type authority provider for storage node, the default value is ALL_PERMITTED - ## @param serverConfig.authority.users[0].password Password for compute node. - ## @param serverConfig.authority.users[0].user Username,authorized host for compute node. Format: @ hostname is % or empty string means do not care about authorized host - ## - authority: - privilege: - type: ALL_PERMITTED - users: - - password: root - user: root@% - ## @section Compute-Node ShardingSphere-Proxy ServerConfiguration mode Configuration parameters - ## @param serverConfig.mode.type Type of mode configuration. Now only support Cluster mode - ## @param serverConfig.mode.repository.props.namespace Namespace of registry center - ## @param serverConfig.mode.repository.props.server-lists Server lists of registry center - ## @param serverConfig.mode.repository.props.maxRetries Max retries of client connection - ## @param serverConfig.mode.repository.props.operationTimeoutMilliseconds Milliseconds of operation timeout - ## @param serverConfig.mode.repository.props.retryIntervalMilliseconds Milliseconds of retry interval - ## @param serverConfig.mode.repository.props.timeToLiveSeconds Seconds of ephemeral data live - ## @param serverConfig.mode.repository.type Type of persist repository. Now only support ZooKeeper - ## @param serverConfig.props.proxy-frontend-database-protocol-type Default startup protocol - mode: - repository: - props: - maxRetries: 3 - namespace: governance_ds - operationTimeoutMilliseconds: 5000 - retryIntervalMilliseconds: 500 - server-lists: "{{ printf \"%s-zookeeper.%s:2181\" .Release.Name .Release.Namespace }}" - timeToLiveSeconds: 600 - type: ZooKeeper - type: Cluster - props: - proxy-frontend-database-protocol-type: MySQL - ## @section ZooKeeper chart parameters - -## ZooKeeper chart configuration -## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml -## -zookeeper: - ## @param zookeeper.enabled Switch to enable or disable the ZooKeeper helm chart - ## - enabled: true - ## @param zookeeper.replicaCount Number of ZooKeeper nodes - ## - replicaCount: 3 - ## ZooKeeper Persistence parameters - ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ - ## @param zookeeper.persistence.enabled Enable persistence on ZooKeeper using PVC(s) - ## @param zookeeper.persistence.storageClass Persistent Volume storage class - ## @param zookeeper.persistence.accessModes Persistent Volume access modes - ## @param zookeeper.persistence.size Persistent Volume size - ## - persistence: - enabled: false - storageClass: "" - accessModes: - - ReadWriteOnce - size: 8Gi - -## @section ComputeNode parameters -computeNode: - ## @param computeNode.storageNodeConnector declare the driver for ShardingSphere Proxy - ## @param computeNode.storageNodeConnector.type is the driver type for ShardingSphere Proxy - ## @param computeNode.storageNodeConnector.version is the driver version for ShardingSphere Proxy - ## - storageNodeConnector: - type: mysql - version: 5.1.47 - ## @param computeNode.serverVersion the vesrion for ShardingSphere Proxy - ## - serverVersion: 5.3.1 - ## @param computeNode.replicas the replicas for ShardingSphere Proxy - ## - replicas: 1 - ## @param computeNode.portBindings the port binding - ## - portBindings: - ## @param computeNode.portBindings[0].name the port binding - ## @param computeNode.portBindings[0].containerPort the container port - ## @param computeNode.portBindings[0].servicePort the service port - ## @param computeNode.portBindings[0].protocol the protocol - # - - name: server - containerPort: 3307 - servicePort: 3307 - protocol: TCP - ## @param computeNode.serviceType the service type - ## - serviceType: ClusterIP - ## @param computeNode.bootstrap the configuration for bootstrapping - ## - bootstrap: - ## @param computeNode.bootstrap.serverConfig the server.yaml - ## - serverConfig: - ## @param computeNode.bootstrap.serverConfig.authority the authorization - ## - authority: - ## @param computeNode.bootstrap.serverConfig.authority.privilege authorized privilege - ## - privilege: - ## @param computeNode.bootstrap.serverConfig.authority.privilege.type authorized privilege type - ## - type: ALL_PERMITTED - ## @param computeNode.users authorized users - ## @param computeNode.users[0].user the username - ## @param computeNode.users[0].password the password - ## - users: - - user: root@% - password: root - ## @param computeNode.bootstrap.serverConfig.mode the modes for ShardingSphere - ## - mode: - ## @param computeNode.bootstrap.serverConfig.mode.type the running mode for ShardingSphere - ## - type: Cluster - ## @param computeNode.bootstrap.serverConfig.mode.repository the config for repository - ## - repository: - ## @param computeNode.bootstrap.serverConfig.mode.repository.type the type for repository - ## e.g. - ## type: ZooKeeper - ## - type: ZooKeeper - ## @param computeNode.bootstrap.serverConfig.mode.repository.props the modes for ShardingSphere - ## e.g. - ## timeToLiveSeconds: "600" - ## server-lists: "{{ printf \"%s-zookeeper.%s:2181\" .Release.Name .Release.Namespace }}" - ## retryIntervalMilliseconds: "500" - ## operationTimeoutMilliseconds: "5000" - ## namespace: governance_ds - ## maxRetries: "3" - ## - props: - timeToLiveSeconds: "600" - serverlists: zookeeper.default:2181 - retryIntervalMilliseconds: "500" - operationTimeoutMilliseconds: "5000" - namespace: governance_ds - maxRetries: "3" - ## @param computeNode.bootstrap.props the props for ShardingSphere - ## e.g. - ## proxy-frontend-database-protocol-type: MySQL - ## - props: - proxyFrontendDatabaseProtocolType: MySQL - - ## @param computeNode.bootstrap.agentConfig the agent.yaml - ## - agentConfig: - ## @param computeNode.bootstrap.agentConfig.plugins the plugins for agent - ## - plugins: - ## metric plugin - metrics: - ## prometheus settings - prometheus: - ## host for prometheus - ## e.g. - ## host: "localhost" - host: "localhost" - ## port for prometheus - ## e.g. - ## port: 9090 - port: 9090 - props: - jvmInformationCollectorEnabled: "true" diff --git a/shardingsphere-operator/pkg/controllers/chaos_controller.go b/shardingsphere-operator/pkg/controllers/chaos_controller.go index 0b607f54..a752a339 100644 --- a/shardingsphere-operator/pkg/controllers/chaos_controller.go +++ b/shardingsphere-operator/pkg/controllers/chaos_controller.go @@ -65,6 +65,10 @@ type ChaosReconciler struct { // +kubebuilder:rbac:groups=shardingsphere.apache.org,resources=chaos,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=shardingsphere.apache.org,resources=chaos/status,verbs=get;update;patch // +kubebuilder:rbac:groups=shardingsphere.apache.org,resources=chaos/finalizers,verbs=update +// +kubebuilder:rbac:groups=chaos-mesh.org,resources=podchaos,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=chaos-mesh.org,resources=stresschaos,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=chaos-mesh.org,resources=networkchaos,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete // Reconcile handles main function of this controller func (r *ChaosReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {