This is an automated email from the ASF dual-hosted git repository.
houston pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr-operator.git
The following commit(s) were added to refs/heads/main by this push:
new 8381105 Add a podDisruptionBudget for the whole cloud (#473)
8381105 is described below
commit 8381105fa129cd0e42b383a899900a07387a0e65
Author: Houston Putman <[email protected]>
AuthorDate: Tue Dec 13 09:56:18 2022 -0600
Add a podDisruptionBudget for the whole cloud (#473)
This increases the minimum supported Kubernetes version to v1.21
---
config/rbac/role.yaml | 12 +++++
controllers/controller_utils_test.go | 41 ++++++++++++--
controllers/solrcloud_controller.go | 32 ++++++++++-
controllers/solrcloud_controller_test.go | 13 ++++-
controllers/util/common.go | 25 +++++++++
controllers/util/solr_pod_disruption.go | 91 ++++++++++++++++++++++++++++++++
docs/solr-cloud/solr-cloud-crd.md | 12 +++++
docs/upgrade-notes.md | 15 +++++-
helm/solr-operator/Chart.yaml | 16 +++++-
helm/solr-operator/templates/role.yaml | 12 +++++
helm/solr/Chart.yaml | 7 ++-
11 files changed, 267 insertions(+), 9 deletions(-)
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 8aa1552..2132bc0 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -151,6 +151,18 @@ rules:
- ingresses/status
verbs:
- get
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
- apiGroups:
- solr.apache.org
resources:
diff --git a/controllers/controller_utils_test.go
b/controllers/controller_utils_test.go
index ce29048..053c3f1 100644
--- a/controllers/controller_utils_test.go
+++ b/controllers/controller_utils_test.go
@@ -20,6 +20,7 @@ package controllers
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
+ policyv1 "k8s.io/api/policy/v1"
"regexp"
solrv1beta1 "github.com/apache/solr-operator/api/v1beta1"
@@ -327,6 +328,40 @@ func expectNoIngress(ctx context.Context, parentResource
client.Object, ingressN
}).Should(MatchError("ingresses.networking.k8s.io \""+ingressName+"\"
not found"), "Ingress exists when it should not")
}
+func expectPodDisruptionBudget(ctx context.Context, parentResource
client.Object, podDisruptionBudgetName string, selector *metav1.LabelSelector,
maxUnavailable intstr.IntOrString, additionalOffset ...int)
*policyv1.PodDisruptionBudget {
+ return expectPodDisruptionBudgetWithChecks(ctx, parentResource,
podDisruptionBudgetName, selector, maxUnavailable, nil,
resolveOffset(additionalOffset))
+}
+
+func expectPodDisruptionBudgetWithChecks(ctx context.Context, parentResource
client.Object, podDisruptionBudgetName string, selector *metav1.LabelSelector,
maxUnavailable intstr.IntOrString, additionalChecks func(Gomega,
*policyv1.PodDisruptionBudget), additionalOffset ...int)
*policyv1.PodDisruptionBudget {
+ podDisruptionBudget := &policyv1.PodDisruptionBudget{}
+ EventuallyWithOffset(resolveOffset(additionalOffset), func(g Gomega) {
+ g.Expect(k8sClient.Get(ctx, resourceKey(parentResource,
podDisruptionBudgetName), podDisruptionBudget)).To(Succeed(), "Expected
ConfigMap does not exist")
+
+ // Verify the PodDisruptionBudget Spec
+ g.Expect(podDisruptionBudget.Spec.Selector).To(Equal(selector),
"PodDisruptionBudget does not have the correct selector.")
+
g.Expect(podDisruptionBudget.Spec.MaxUnavailable).To(Equal(&maxUnavailable),
"PodDisruptionBudget does not have the correct maxUnavailable setting.")
+
+ if additionalChecks != nil {
+ additionalChecks(g, podDisruptionBudget)
+ }
+ }).Should(Succeed())
+
+ By("recreating the PodDisruptionBudget after it is deleted")
+ ExpectWithOffset(resolveOffset(additionalOffset), k8sClient.Delete(ctx,
podDisruptionBudget)).To(Succeed())
+ EventuallyWithOffset(
+ resolveOffset(additionalOffset),
+ func() (types.UID, error) {
+ newResource := &policyv1.PodDisruptionBudget{}
+ err := k8sClient.Get(ctx, resourceKey(parentResource,
podDisruptionBudgetName), newResource)
+ if err != nil {
+ return "", err
+ }
+ return newResource.UID, nil
+ }).Should(And(Not(BeEmpty()),
Not(Equal(podDisruptionBudget.UID))), "New PodDisruptionBudget, with new UID,
not created.")
+
+ return podDisruptionBudget
+}
+
func expectConfigMap(ctx context.Context, parentResource client.Object,
configMapName string, configMapData map[string]string, additionalOffset ...int)
*corev1.ConfigMap {
return expectConfigMapWithChecks(ctx, parentResource, configMapName,
configMapData, nil, resolveOffset(additionalOffset))
}
@@ -741,9 +776,9 @@ var (
"testS4": "valueS4",
}
testNodeSelectors = map[string]string{
- "beta.kubernetes.io/arch": "amd64",
- "beta.kubernetes.io/os": "linux",
- "solrclouds": "true",
+ "kubernetes.io/arch": "amd64",
+ "kubernetes.io/os": "linux",
+ "solrclouds": "true",
}
testProbeLivenessNonDefaults = &corev1.Probe{
InitialDelaySeconds: 20,
diff --git a/controllers/solrcloud_controller.go
b/controllers/solrcloud_controller.go
index c10a8de..9c9aacc 100644
--- a/controllers/solrcloud_controller.go
+++ b/controllers/solrcloud_controller.go
@@ -21,6 +21,7 @@ import (
"context"
"crypto/md5"
"fmt"
+ policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/runtime"
"reflect"
"sort"
@@ -73,6 +74,7 @@ func UseZkCRD(useCRD bool) {
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=configmaps/status,verbs=get
//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;delete
+//+kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=zookeeper.pravega.io,resources=zookeeperclusters,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=zookeeper.pravega.io,resources=zookeeperclusters/status,verbs=get
//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete
@@ -457,6 +459,33 @@ func (r *SolrCloudReconciler) Reconcile(ctx
context.Context, req ctrl.Request) (
}
}
+ // PodDistruptionBudget(s)
+ pdb := util.GeneratePodDisruptionBudget(instance, pvcLabelSelector)
+
+ // Check if the PodDistruptionBudget already exists
+ pdbLogger := logger.WithValues("podDisruptionBudget", pdb.Name)
+ foundPDB := &policyv1.PodDisruptionBudget{}
+ err = r.Get(ctx, types.NamespacedName{Name: pdb.Name, Namespace:
pdb.Namespace}, foundPDB)
+ if err != nil && errors.IsNotFound(err) {
+ pdbLogger.Info("Creating PodDisruptionBudget")
+ if err = controllerutil.SetControllerReference(instance, pdb,
r.Scheme); err == nil {
+ err = r.Create(ctx, pdb)
+ }
+ } else if err == nil {
+ var needsUpdate bool
+ needsUpdate, err = util.OvertakeControllerRef(instance,
foundPDB, r.Scheme)
+ needsUpdate = util.CopyPodDisruptionBudgetFields(pdb, foundPDB,
pdbLogger) || needsUpdate
+
+ // Update the found PodDistruptionBudget and write the result
back if there are any changes
+ if needsUpdate && err == nil {
+ pdbLogger.Info("Updating PodDisruptionBudget")
+ err = r.Update(ctx, foundPDB)
+ }
+ }
+ if err != nil {
+ return requeueOrNot, err
+ }
+
extAddressabilityOpts := instance.Spec.SolrAddressability.External
if extAddressabilityOpts != nil && extAddressabilityOpts.Method ==
solrv1beta1.Ingress {
// Generate Ingress
@@ -893,7 +922,8 @@ func (r *SolrCloudReconciler) SetupWithManager(mgr
ctrl.Manager) error {
Owns(&appsv1.StatefulSet{}).
Owns(&corev1.Service{}).
Owns(&corev1.Secret{}). /* for authentication */
- Owns(&netv1.Ingress{})
+ Owns(&netv1.Ingress{}).
+ Owns(&policyv1.PodDisruptionBudget{})
var err error
ctrlBuilder, err = r.indexAndWatchForProvidedConfigMaps(mgr,
ctrlBuilder)
diff --git a/controllers/solrcloud_controller_test.go
b/controllers/solrcloud_controller_test.go
index 1d5903f..3f05089 100644
--- a/controllers/solrcloud_controller_test.go
+++ b/controllers/solrcloud_controller_test.go
@@ -28,6 +28,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
"strconv"
"strings"
)
@@ -146,10 +147,14 @@ var _ = FDescribe("SolrCloud controller - General",
func() {
By("making sure no Ingress was created")
expectNoIngress(ctx, solrCloud,
solrCloud.CommonIngressName())
+
+ By("testing the PodDisruptionBudget")
+ expectPodDisruptionBudget(ctx, solrCloud,
solrCloud.StatefulSetName(), statefulSet.Spec.Selector,
intstr.FromString(util.DefaultMaxPodsUnavailable))
})
})
FContext("Solr Cloud with Custom Kube Options", func() {
+ three := intstr.FromInt(3)
BeforeEach(func() {
replicas := int32(4)
solrCloud.Spec = solrv1beta1.SolrCloudSpec{
@@ -164,7 +169,10 @@ var _ = FDescribe("SolrCloud controller - General", func()
{
},
},
UpdateStrategy: solrv1beta1.SolrUpdateStrategy{
- Method:
solrv1beta1.StatefulSetUpdate,
+ Method: solrv1beta1.StatefulSetUpdate,
+ ManagedUpdateOptions:
solrv1beta1.ManagedUpdateOptions{
+ MaxPodsUnavailable: &three,
+ },
RestartSchedule: "@every 30m",
},
SolrGCTune: "gc Options",
@@ -280,6 +288,9 @@ var _ = FDescribe("SolrCloud controller - General", func() {
Expect(headlessService.Spec.Ports[0].Protocol).To(Equal(corev1.ProtocolTCP),
"Wrong protocol on headless Service")
Expect(headlessService.Spec.Ports[0].AppProtocol).ToNot(BeNil(), "AppProtocol
on headless Service should not be nil")
Expect(*headlessService.Spec.Ports[0].AppProtocol).To(Equal("http"), "Wrong
appProtocol on headless Service")
+
+ By("testing the PodDisruptionBudget")
+ expectPodDisruptionBudget(ctx, solrCloud,
solrCloud.StatefulSetName(), statefulSet.Spec.Selector, three)
})
})
diff --git a/controllers/util/common.go b/controllers/util/common.go
index d58e0a1..b887f6d 100644
--- a/controllers/util/common.go
+++ b/controllers/util/common.go
@@ -18,6 +18,7 @@
package util
import (
+ policyv1 "k8s.io/api/policy/v1"
"reflect"
"strconv"
"strings"
@@ -668,6 +669,30 @@ func CopyContainerResourceList(fromPtr, toPtr
*corev1.ResourceList, basePath str
return requireUpdate
}
+// CopyPodDisruptionBudgetFields copies the owned fields from one
PodDisruptionBudget to another
+func CopyPodDisruptionBudgetFields(from, to *policyv1.PodDisruptionBudget,
logger logr.Logger) bool {
+ logger = logger.WithValues("kind", "PodDisruptionBudget")
+ requireUpdate := CopyLabelsAndAnnotations(&from.ObjectMeta,
&to.ObjectMeta, logger)
+
+ if !DeepEqualWithNils(to.Spec.MinAvailable, from.Spec.MinAvailable) {
+ requireUpdate = true
+ logger.Info("Update required because field changed", "field",
"Spec.MinAvailable", "from", to.Spec.MinAvailable, "to", from.Spec.MinAvailable)
+ to.Spec.MinAvailable = from.Spec.MinAvailable
+ }
+ if !DeepEqualWithNils(to.Spec.MaxUnavailable, from.Spec.MaxUnavailable)
{
+ requireUpdate = true
+ logger.Info("Update required because field changed", "field",
"Spec.MaxUnavailable", "from", to.Spec.MaxUnavailable, "to",
from.Spec.MaxUnavailable)
+ to.Spec.MaxUnavailable = from.Spec.MaxUnavailable
+ }
+ if !DeepEqualWithNils(to.Spec.Selector, from.Spec.Selector) {
+ requireUpdate = true
+ logger.Info("Update required because field changed", "field",
"Spec.Selector", "from", to.Spec.Selector, "to", from.Spec.Selector)
+ to.Spec.Selector = from.Spec.Selector
+ }
+
+ return requireUpdate
+}
+
// OvertakeControllerRef makes sure that the controlled object has the owner
as the controller ref.
// If the object has a different controller, then that ref will be downgraded
to an "owner" and the new controller ref will be added
func OvertakeControllerRef(owner metav1.Object, controlled metav1.Object,
scheme *runtime.Scheme) (needsUpdate bool, err error) {
diff --git a/controllers/util/solr_pod_disruption.go
b/controllers/util/solr_pod_disruption.go
new file mode 100644
index 0000000..037ac7b
--- /dev/null
+++ b/controllers/util/solr_pod_disruption.go
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package util
+
+import (
+ solr "github.com/apache/solr-operator/api/v1beta1"
+ policyv1 "k8s.io/api/policy/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+func GeneratePodDisruptionBudget(cloud *solr.SolrCloud, selector
map[string]string) *policyv1.PodDisruptionBudget {
+ // For this PDB, we can use an intOrString maxUnavailable (whatever the
user provides),
+ // because we are matching the labelSelector used by the statefulSet.
+ var maxUnavailable intstr.IntOrString
+ if cloud.Spec.UpdateStrategy.ManagedUpdateOptions.MaxPodsUnavailable !=
nil {
+ maxUnavailable =
*cloud.Spec.UpdateStrategy.ManagedUpdateOptions.MaxPodsUnavailable
+ } else {
+ maxUnavailable = intstr.FromString(DefaultMaxPodsUnavailable)
+ }
+ return &policyv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: cloud.StatefulSetName(),
+ Namespace: cloud.Namespace,
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: selector,
+ },
+ MaxUnavailable: &maxUnavailable,
+ },
+ }
+}
+
+/*
+*
+We cannot actually use the shard topology for PDBs, because Kubernetes does
not currently support a pod
+mapping to multiple PDBs. Since a Solr pod is sure to host replicas of
multiple shards, then we would
+have to create multiple PDBs that cover a single pod. Therefore we can only
use the Cloud PDB defined in the method
+above.
+
+Whenever we can use this, when generating PDBs, we need to label them so that
a list of all PDBs for a cloud can be found easily.
+That way, when we have the list of PDBs to create/update, we will aslo know
the list of PDBs that need to be deleted.
+
+Kubernetes Documentation:
https://kubernetes.io/docs/tasks/run-application/configure-pdb/#arbitrary-controllers-and-selectors
+*/
+func createPodDisruptionBudgetForShard(cloud *solr.SolrCloud, collection
string, shard string, nodes []string) policyv1.PodDisruptionBudget {
+ maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(
+
intstr.ValueOrDefault(cloud.Spec.UpdateStrategy.ManagedUpdateOptions.MaxShardReplicasUnavailable,
intstr.FromInt(DefaultMaxShardReplicasUnavailable)),
+ len(nodes),
+ false)
+ if err != nil {
+ maxUnavailable = 1
+ }
+ // From the documentation above, Kubernetes will only accept an int
minAvailable for PDBs that use custom pod selectors.
+ // Therefore, we cannot use the maxUnavailable straight from what the
user provides.
+ minAvailable := intstr.FromInt(len(nodes) - maxUnavailable)
+ return policyv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: cloud.Name + "-" + collection + "-" + shard,
+ Namespace: cloud.Namespace,
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchExpressions:
[]metav1.LabelSelectorRequirement{
+ {
+ Key:
"statefulset.kubernetes.io/pod-name",
+ Operator: "In",
+ Values: nodes,
+ },
+ },
+ },
+ MinAvailable: &minAvailable,
+ },
+ }
+}
diff --git a/docs/solr-cloud/solr-cloud-crd.md
b/docs/solr-cloud/solr-cloud-crd.md
index 9255364..47dbce7 100644
--- a/docs/solr-cloud/solr-cloud-crd.md
+++ b/docs/solr-cloud/solr-cloud-crd.md
@@ -96,6 +96,18 @@ Under `SolrCloud.Spec.updateStrategy`:
- **`maxPodsUnavailable`** - The `maximumPodsUnavailable` is calculated as
the percentage of the total pods configured for that Solr Cloud.
- **`maxShardReplicasUnavailable`** - The `maxShardReplicasUnavailable` is
calculated independently for each shard, as the percentage of the number of
replicas for that shard.
+### Pod Disruption Budgets
+_Since v0.7.0_
+
+The Solr Operator will create a
[`PodDisruptionBudget`](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#pod-disruption-budgets)
to ensure that Kubernetes does not take down more than acceptable amount of
SolrCloud nodes at a time.
+The PDB's `maxUnavailable` setting is populated from the `maxPodsUnavailable`
setting in `SolrCloud.Spec.updateStrategy.managed`.
+If this option is not set, it will use the default value (`25%`).
+
+Currently, the implementation does not take shard/replica topology into
account, like the update strategy does.
+So although Kubernetes might just take down 25% of a Cloud's nodes, that might
represent all nodes that host a shard's replicas.
+This is ongoing work, and hopefully something the Solr Operator can protect
against in the future.
+See [this discussion](https://github.com/apache/solr-operator/issues/471) for
more information.
+
## Addressability
_Since v0.2.6_
diff --git a/docs/upgrade-notes.md b/docs/upgrade-notes.md
index eb3967b..633a0ae 100644
--- a/docs/upgrade-notes.md
+++ b/docs/upgrade-notes.md
@@ -27,8 +27,8 @@ If you want to skip versions when upgrading, be sure to check
out the [upgrading
### Kubernetes Versions
-| Solr Operator Version | `1.15` | `1.16` - `1.18` | `1.19` - `1.21` |
`1.22`+ |
-|:---------------------:| :---: | :---: | :---: | :---: |
+| Solr Operator Version | `1.15` | `1.16` - `1.18` | `1.19` - `1.21` |
`1.22`+ |
+|:---------------------:| :---: | :---: |:------------------:| :---: |
| `v0.2.6` | :heavy_check_mark: | :heavy_check_mark: |
:heavy_check_mark: | :x: |
| `v0.2.7` | :x: | :heavy_check_mark: | :heavy_check_mark: | :x: |
| `v0.2.8` | :x: | :heavy_check_mark: | :heavy_check_mark: | :x: |
@@ -36,6 +36,7 @@ If you want to skip versions when upgrading, be sure to check
out the [upgrading
| `v0.4.x` | :x: | :heavy_check_mark: | :heavy_check_mark: | :x: |
| `v0.5.x` | :x: | :x: | :heavy_check_mark: | :heavy_check_mark: |
| `v0.6.x` | :x: | :x: | :heavy_check_mark: | :heavy_check_mark: |
+| `v0.7.x` | :x: | :x: | :x: | :heavy_check_mark: |
### Solr Versions
@@ -48,6 +49,7 @@ If you want to skip versions when upgrading, be sure to check
out the [upgrading
| `v0.4.x` | :grey_question: | :heavy_check_mark: |
:heavy_check_mark: | :heavy_check_mark: |
| `v0.5.x` | :grey_question: | :heavy_check_mark: |
:heavy_check_mark: | :heavy_check_mark: |
| `v0.6.x` | :grey_question: | :heavy_check_mark: |
:heavy_check_mark: | :heavy_check_mark: |
+| `v0.7.x` | :grey_question: | :heavy_check_mark: |
:heavy_check_mark: | :heavy_check_mark: |
Please note that this represents basic compatibility with the Solr Operator.
There may be options and features that require newer versions of Solr.
@@ -107,6 +109,15 @@ _Note that the Helm chart version does not contain a `v`
prefix, which the downl
## Upgrade Warnings and Notes
+### v0.7.0
+- **Kubernetes support is now limited to 1.21+.**
+ If you are unable to use a newer version of Kubernetes, please install the
`v0.6.0` version of the Solr Operator for use with Kubernetes `1.20` and below.
+ See the [version compatibility matrix](#kubernetes-versions) for more
information.
+
+- `PodDisruptionBudgets` are now created alongside SolrCloud instances.
+ The maximum number of pods allowed down at any given time is aligned with
the [Managed Update settings](solr-cloud/solr-cloud-crd.md#update-strategy)
provided in the spec.
+ If this is not provided, the default setting (`25%`) is used.
+
### v0.6.0
- The default Solr version for the `SolrCloud` and `SolrPrometheusExporter`
resources has been upgraded from `8.9` to `8.11`.
This will not affect any existing resources, as default versions are
hard-written to the resources immediately.
diff --git a/helm/solr-operator/Chart.yaml b/helm/solr-operator/Chart.yaml
index 55a9982..1da9c9c 100644
--- a/helm/solr-operator/Chart.yaml
+++ b/helm/solr-operator/Chart.yaml
@@ -17,7 +17,7 @@ name: solr-operator
description: The Solr Operator enables easy management of Solr resources
within Kubernetes.
version: 0.7.0-prerelease
appVersion: v0.7.0-prerelease
-kubeVersion: ">= 1.19.0-0"
+kubeVersion: ">= 1.21.0-0"
home: https://solr.apache.org/operator
sources:
- https://github.com/apache/solr-operator
@@ -53,6 +53,11 @@ annotations:
# Add change log for a single release here.
# Allowed syntax is described at:
https://artifacthub.io/docs/topics/annotations/helm/#example
artifacthub.io/changes: |
+ - kind: changed
+ description: Minimum Kubernetes version has been upped to 1.21
+ links:
+ - name: GitHub PR
+ url: https://github.com/apache/solr-operator/pull/473
- kind: fixed
description: Fix bug with named PVCs
links:
@@ -65,6 +70,15 @@ annotations:
links:
- name: GitHub PR
url: https://github.com/apache/solr-operator/pull/480
+ - kind: added
+ description: SolrClouds now have PodDisruptionBudgets enabled
+ links:
+ - name: GitHub Issue
+ url: https://github.com/apache/solr-operator/issues/471
+ - name: GitHub PR
+ url: https://github.com/apache/solr-operator/pull/473
+ - name: PodDisruptionBudget Documentation
+ url:
https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#pod-disruption-budgets
artifacthub.io/images: |
- name: solr-operator
image: apache/solr-operator:v0.7.0-prerelease
diff --git a/helm/solr-operator/templates/role.yaml
b/helm/solr-operator/templates/role.yaml
index 956b0aa..2f122e0 100644
--- a/helm/solr-operator/templates/role.yaml
+++ b/helm/solr-operator/templates/role.yaml
@@ -155,6 +155,18 @@ rules:
- ingresses/status
verbs:
- get
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
- apiGroups:
- solr.apache.org
resources:
diff --git a/helm/solr/Chart.yaml b/helm/solr/Chart.yaml
index 64a86bf..6b89239 100644
--- a/helm/solr/Chart.yaml
+++ b/helm/solr/Chart.yaml
@@ -17,7 +17,7 @@ name: solr
description: A SolrCloud cluster running on Kubernetes via the Solr Operator
version: 0.7.0-prerelease
appVersion: 8.11.1
-kubeVersion: ">= 1.19.0-0"
+kubeVersion: ">= 1.21.0-0"
home: https://solr.apache.org
sources:
- https://github.com/apache/solr
@@ -39,6 +39,11 @@ annotations:
# Add change log for a single release here.
# Allowed syntax is described at:
https://artifacthub.io/docs/topics/annotations/helm/#example
artifacthub.io/changes: |
+ - kind: changed
+ description: Minimum Kubernetes version has been upped to 1.21
+ links:
+ - name: GitHub PR
+ url: https://github.com/apache/solr-operator/pull/473
- kind: added
description: Support custom annotations on created ServiceAccount
links: