Unverified Commit 549bb9b4 authored by Daniel Holbach's avatar Daniel Holbach Committed by GitHub
Browse files

Merge pull request #165 from dholbach/prep-for-k8s-1.19

Prepare for k8s release 1.19 (Aug 25)
parents b024898e 3ebc2249
......@@ -44,7 +44,7 @@ compatibility of one minor version between client and server:
| kured | kubectl | k8s.io/client-go | k8s.io/apimachinery | expected kubernetes compatibility |
|--------|---------|------------------|---------------------|-----------------------------------|
| master | 1.17.7 | v0.17.0 | v0.17.0 | 1.16.x, 1.17.x, 1.18.x |
| master | 1.18.8 | v0.18.8 | v0.18.8 | 1.17.x, 1.18.x, 1.19.x |
| 1.4.4 | 1.17.7 | v0.17.0 | v0.17.0 | 1.16.x, 1.17.x, 1.18.x |
| 1.3.0 | 1.15.10 | v12.0.0 | release-1.15 | 1.15.x, 1.16.x, 1.17.x |
| 1.2.0 | 1.13.6 | v10.0.0 | release-1.13 | 1.12.x, 1.13.x, 1.14.x |
......
......@@ -10,7 +10,7 @@ rules:
# Allow kubectl to drain/uncordon
#
# NB: These permissions are tightly coupled to the bundled version of kubectl; the ones below
# match https://github.com/kubernetes/kubernetes/blob/v1.12.1/pkg/kubectl/cmd/drain.go
# match https://github.com/kubernetes/kubernetes/blob/v1.18.8/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go
#
- apiGroups: [""]
resources: ["nodes"]
......
FROM alpine:3.11
FROM alpine:3.12
RUN apk update && apk add ca-certificates tzdata && rm -rf /var/cache/apk/*
# NB: you may need to update RBAC permissions when upgrading kubectl - see kured-rbac.yaml for details
ADD https://storage.googleapis.com/kubernetes-release/release/v1.17.7/bin/linux/amd64/kubectl /usr/bin/kubectl
ADD https://storage.googleapis.com/kubernetes-release/release/v1.18.8/bin/linux/amd64/kubectl /usr/bin/kubectl
RUN chmod 0755 /usr/bin/kubectl
COPY ./kured /usr/bin/kured
ENTRYPOINT ["/usr/bin/kured"]
package main
import (
"context"
"fmt"
"math/rand"
"net/http"
......@@ -173,7 +174,7 @@ func rebootBlocked(client *kubernetes.Clientset, nodeID string) bool {
fieldSelector := fmt.Sprintf("spec.nodeName=%s", nodeID)
for _, labelSelector := range podSelectors {
podList, err := client.CoreV1().Pods("").List(metav1.ListOptions{
podList, err := client.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{
LabelSelector: labelSelector,
FieldSelector: fieldSelector,
Limit: 10})
......@@ -257,7 +258,7 @@ func uncordon(nodeID string) {
}
func commandReboot(nodeID string) {
log.Infof("Commanding reboot")
log.Infof("Commanding reboot for node: %s", nodeID)
if slackHookURL != "" {
if err := slack.NotifyReboot(slackHookURL, slackUsername, slackChannel, nodeID); err != nil {
......@@ -313,7 +314,7 @@ func rebootAsRequired(nodeID string, window *timewindow.TimeWindow, TTL time.Dur
tick := delaytick.New(source, period)
for _ = range tick {
if window.Contains(time.Now()) && rebootRequired() && !rebootBlocked(client, nodeID) {
node, err := client.CoreV1().Nodes().Get(nodeID, metav1.GetOptions{})
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeID, metav1.GetOptions{})
if err != nil {
log.Fatal(err)
}
......
......@@ -10,6 +10,6 @@ require (
github.com/prometheus/procfs v0.0.0-20190102135031-14fa7590c24d // indirect
github.com/sirupsen/logrus v1.2.0
github.com/spf13/cobra v0.0.0-20181127133106-d2d81d9a96e2
k8s.io/apimachinery v0.17.0
k8s.io/client-go v0.17.0
k8s.io/apimachinery v0.18.8
k8s.io/client-go v0.18.8
)
This diff is collapsed.
......@@ -8,7 +8,7 @@ rules:
# Allow kubectl to drain/uncordon
#
# NB: These permissions are tightly coupled to the bundled version of kubectl; the ones below
# match https://github.com/kubernetes/kubernetes/blob/v1.17.7/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go
# match https://github.com/kubernetes/kubernetes/blob/v1.18.8/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go
#
- apiGroups: [""]
resources: ["nodes"]
......
package daemonsetlock
import (
"context"
"encoding/json"
"fmt"
"time"
......@@ -31,7 +32,7 @@ func New(client *kubernetes.Clientset, nodeID, namespace, name, annotation strin
func (dsl *DaemonSetLock) Acquire(metadata interface{}, TTL time.Duration) (acquired bool, owner string, err error) {
for {
ds, err := dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(dsl.name, metav1.GetOptions{})
ds, err := dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(context.TODO(), dsl.name, metav1.GetOptions{})
if err != nil {
return false, "", err
}
......@@ -60,7 +61,7 @@ func (dsl *DaemonSetLock) Acquire(metadata interface{}, TTL time.Duration) (acqu
}
ds.ObjectMeta.Annotations[dsl.annotation] = string(valueBytes)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(ds)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
if err != nil {
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
// Something else updated the resource between us reading and writing - try again soon
......@@ -75,7 +76,7 @@ func (dsl *DaemonSetLock) Acquire(metadata interface{}, TTL time.Duration) (acqu
}
func (dsl *DaemonSetLock) Test(metadata interface{}) (holding bool, err error) {
ds, err := dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(dsl.name, metav1.GetOptions{})
ds, err := dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(context.TODO(), dsl.name, metav1.GetOptions{})
if err != nil {
return false, err
}
......@@ -99,7 +100,7 @@ func (dsl *DaemonSetLock) Test(metadata interface{}) (holding bool, err error) {
func (dsl *DaemonSetLock) Release() error {
for {
ds, err := dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(dsl.name, metav1.GetOptions{})
ds, err := dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(context.TODO(), dsl.name, metav1.GetOptions{})
if err != nil {
return err
}
......@@ -119,7 +120,7 @@ func (dsl *DaemonSetLock) Release() error {
delete(ds.ObjectMeta.Annotations, dsl.annotation)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(ds)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
if err != nil {
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
// Something else updated the resource between us reading and writing - try again soon
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment