I'm creating a custom resource definition (CRD) with an associated controller using kubebuilder. My controller reconcile loop creates a deployment sub-resource and parents it to the custom resource using controllerutil.SetControllerReference(&myResource, deployment, r.Scheme). I've also configured my reconciler so "own" the sub-resource, as follows:
// SetupWithManager sets up the controller with the Manager.
func (r *MyResourceReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&mygroupv1alpha1.MyResource{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}
However, when I run my controller locally using make run, I noticed that deleting the my CR (the root object) doesn't cause the Deployment sub-resource to get garbage collected. I also noticed that deleting the Deployment sub-resource doesn't trigger my reconciler to run. Why is this? Is there something I'm not doing or is this possibly a limitation of local development/testing?
Using #coderanger's hint, I could see that the metadata.ownerReferences weren't being set correctly when running the following command:
kubectl get deployments sample-deployment -o yaml
The problem was my controller's reconcile code. I was calling controllerutil.SetControllerReference(&myResource, deployment, r.Scheme) only after I'd already created and persisted the Deployment.
Buggy code
log.Info("Creating a deployment")
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: myResource.Namespace,
},
Spec: deploymentSpec,
}
if err = r.Create(ctx, deployment); err != nil {
log.Error(err, "Failed to create deployment")
if errors.IsInvalid(err) {
// Don't retry on validation errors
err = nil
}
return ctrl.Result{}, err
}
// Establish the parent-child relationship between my resource and the deployment
log.Info("Making my resource a parent of the deployment")
if err = controllerutil.SetControllerReference(&myResource, deployment, r.Scheme); err != nil {
log.Error(err, "Failed to set deployment controller reference")
return ctrl.Result{}, err
}
To fix it, I needed to swap the order of the call to r.Create and controllerutil.SetControllerReference:
Working code
log.Info("Creating a deployment")
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: myResource.Namespace,
},
Spec: deploymentSpec,
}
// Establish the parent-child relationship between my resource and the deployment
log.Info("Making my resource a parent of the deployment")
if err = controllerutil.SetControllerReference(&myResource, deployment, r.Scheme); err != nil {
log.Error(err, "Failed to set deployment controller reference")
return ctrl.Result{}, err
}
// Create the deployment with the parent/child relationship configured
if err = r.Create(ctx, deployment); err != nil {
log.Error(err, "Failed to create deployment")
if errors.IsInvalid(err) {
// Don't retry on validation errors
err = nil
}
return ctrl.Result{}, err
}
I was able to confirm that this worked by looking at the metadata.ownerReferences YAML data for my created deployment (using the command referenced above).
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: "2021-08-02T16:22:04Z"
generation: 1
name: sample-deployment
namespace: default
ownerReferences:
- apiVersion: resources.mydomain.io/v1alpha1
blockOwnerDeletion: true
controller: true
kind: MyResource
name: myresource-sample
uid: 6ebb146c-afc7-4601-bd75-58efc29beac9
resourceVersion: "569913"
uid: d9a4496f-7418-4831-ab87-4804dcd1f8aa
Related
So Kubernetes events seems to have options for watch for all kinds of things like pod up/down creation/deletion etc.. - but I want to watch for a namespace creation/deletion itself - and I can't find an option to do that.
I want to know if someone created a namespace or deleted one. Is this possible?
Rgds,
Gopa.
So I got it working, pasting the entire code below for anyone's future reference
package main
import (
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
"github.com/golang/glog"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
func newNamespace(obj interface{}) {
ns := obj.(v1.Object)
glog.Error("New Namespace ", ns.GetName())
}
func modNamespace(objOld interface{}, objNew interface{}) {
}
func delNamespace(obj interface{}) {
ns := obj.(v1.Object)
glog.Error("Del Namespace ", ns.GetName())
}
func watchNamespace(k8s *kubernetes.Clientset) {
// Add watcher for the Namespace.
factory := informers.NewSharedInformerFactory(k8s, 5*time.Second)
nsInformer := factory.Core().V1().Namespaces().Informer()
nsInformerChan := make(chan struct{})
//defer close(nsInformerChan)
defer runtime.HandleCrash()
// Namespace informer state change handler
nsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
// When a new namespace gets created
AddFunc: func(obj interface{}) {
newNamespace(obj)
},
// When a namespace gets updated
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
modNamespace(oldObj, newObj)
},
// When a namespace gets deleted
DeleteFunc: func(obj interface{}) {
delNamespace(obj)
},
})
factory.Start(nsInformerChan)
//go nsInformer.GetController().Run(nsInformerChan)
go nsInformer.Run(nsInformerChan)
}
func main() {
kconfig := os.Getenv("KUBECONFIG")
glog.Error("KCONFIG", kconfig)
var config *rest.Config
var clientset *kubernetes.Clientset
var err error
for {
if config == nil {
config, err = clientcmd.BuildConfigFromFlags("", kconfig)
if err != nil {
glog.Error("Cant create kubernetes config")
time.Sleep(time.Second)
continue
}
}
// creates the clientset
clientset, err = kubernetes.NewForConfig(config)
if err != nil {
glog.Error("Cannot create kubernetes client")
time.Sleep(time.Second)
continue
}
break
}
watchNamespace(clientset)
glog.Error("Watch started")
term := make(chan os.Signal, 1)
signal.Notify(term, os.Interrupt)
signal.Notify(term, syscall.SIGTERM)
select {
case <-term:
}
}
Events are namespaced, they show the events happening in a particular namespace. However -A|--all-namespaces would show events from all the namespaces.
If you want to track the lifecycle(creation|deletion|etc) of the namespaces,then audit.logs are simplest option if not the only option.
Example: Creation of namespace called my-ns:
kubectl create ns my-ns
kubectl get events -n my-ns
No resources found in my-ns namespace.
kubectl describe ns my-ns
Name: my-ns
Labels: <none>
Annotations: <none>
Status: Active
No resource quota.
No LimitRange resource.
Now Here is the output of audit.log at metadata level, this would tell the following:
who created
what created
when created
and lot more.
Example output:
{
"kind":"Event",
"apiVersion":"audit.k8s.io/v1",
"level":"Metadata",
"auditID":"d28619be-0cb7-4d3e-b195-51fb93ae6de4",
"stage":"ResponseComplete",
"requestURI":"/api/v1/namespaces?fieldManager=kubectl-create",
"verb":"create", #<------operation type
"user":{
"username":"kubernetes-admin", #<--------who created
"groups":[
"system:masters",
"system:authenticated"
]
},
"sourceIPs":[
"1.2.3.4"
],
"userAgent":"kubectl/v1.20.0 (linux/amd64) kubernetes/af46c47",
"objectRef":{
"resource":"namespaces", #<---what created
"name":"my-ns", #<---name of resource
"apiVersion":"v1"
},
"responseStatus":{
"metadata":{
},
"code":201
},
"requestReceivedTimestamp":"2021-09-24T16:44:28.094213Z", #<---when created.
"stageTimestamp":"2021-09-24T16:44:28.270294Z",
"annotations":{
"authorization.k8s.io/decision":"allow",
"authorization.k8s.io/reason":""
}
}
$ kubectl get ns --watch-only
# run "kubectl create ns test" from another terminal
test Active 0s
# run "kubectl delete ns test"
test Terminating 23s
test Terminating 28s
test Terminating 28s
I was following #ymmt2005 excellent dynamic client guide. All is good until the final step when I make the actual PATCH call, and I get a the server could not find the requested resource error. Just about everything seems right, except I'm unsure about the 'FieldManager' field in the PathOptions struct. I'm not sure what "the actor or entity that is making these changes" refers to. Does this need to match something in my code or system? Any other ideas?
package main
import (
...
)
const resourceYAML = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: mike-nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: 'nginx:latest'
ports:
- containerPort: 80
`
func main() {
ctx := context.Background()
// Create dynamic discovery client from local kubeconfig file
kubePath := filepath.Join(homedir.HomeDir(), ".kube", "config")
cfg, err := clientcmd.BuildConfigFromFlags("", kubePath)
if err != nil {
log.Fatalf("error building config, %v\n", err)
}
dynClient, err := dynamic.NewForConfig(cfg)
if err != nil {
log.Fatalf("error creating client, %v\n", err)
}
disClient, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
log.Fatalf("error creating discovery client, %v\n", err)
}
// Decode YAML manifest & get GVK
decodeUnstr := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
obj := &unstructured.Unstructured{}
_, gvk, err := decodeUnstr.Decode([]byte(resourceYAML), nil, obj)
if err != nil {
log.Fatalf("error decoding manifest, %v\n", err)
}
jsonObj, err := json.Marshal(obj)
if err != nil {
log.Fatalf("error marshaling object, %v\n", err)
}
// Find GVR using GVK
mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(disClient))
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
log.Fatalf("error finding GVR, %v\n", err)
}
// Get REST interface for the GVR, checking for namespace or cluster-wide
var dr dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
// Namespaced resource
dr = dynClient.Resource(mapping.Resource).Namespace(obj.GetNamespace())
} else {
// Cluster-wide resource
dr = dynClient.Resource(mapping.Resource)
}
// Create or Update the object with SSA
options := metav1.PatchOptions{FieldManager: "sample-controller"}
_, err = dr.Patch(ctx, obj.GetName(), types.ApplyPatchType, jsonObj, options)
if err != nil {
log.Fatalf("error patching, %v\n", err)
}
}
[edit] I confirmed that I was only able to use 'Patch' on a resource that already existed. I tweaked the code to use 'Create' to create the resource, then I was able to successfully do a 'Patch' against it to change. To overcome the FieldManager inconsistencies I added Force=true to the PatchOptions which is recommended in the docs anyway. I'd still like to know how I can create if resource doesn't exist and update if it does--maybe just test for exist?
The answer is really trivial. The original code assumes that namespace is provided in the manifest. The deployment endpoint does not automatically set namespace to default if the provided namespace is "", and errors out because "" is not a valid namespace. Therefore, I added logic to set namespace to default if not provided and presto, the server side apply will create the resource if it doesn't exist and update if it does exist. Thanks again #ymmt2005 .
package main
import (
...
)
const resourceYAML = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: mike-nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: 'nginx:latest'
ports:
- containerPort: 80
`
func main() {
ctx := context.Background()
// Create dynamic discovery client from local kubeconfig file
kubePath := filepath.Join(homedir.HomeDir(), ".kube", "config")
cfg, err := clientcmd.BuildConfigFromFlags("", kubePath)
if err != nil {
log.Fatalf("error building config, %v\n", err)
}
dynClient, err := dynamic.NewForConfig(cfg)
if err != nil {
log.Fatalf("error creating client, %v\n", err)
}
disClient, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
log.Fatalf("error creating discovery client, %v\n", err)
}
// Decode YAML manifest & get GVK
decodeUnstr := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
obj := &unstructured.Unstructured{}
_, gvk, err := decodeUnstr.Decode([]byte(resourceYAML), nil, obj)
if err != nil {
log.Fatalf("error decoding manifest, %v\n", err)
}
jsonObj, err := json.Marshal(obj)
if err != nil {
log.Fatalf("error marshaling object, %v\n", err)
}
// Find GVR using GVK
mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(disClient))
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
log.Fatalf("error finding GVR, %v\n", err)
}
// Set Namespace to default if not provided in manifest
var ns string
if ns = obj.GetNamespace(); ns == "" {
ns = "default"
}
// Get REST interface for the GVR, checking for namespace or cluster-wide
var dr dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
// Namespaced resource
dr = dynClient.Resource(mapping.Resource).Namespace(ns)
} else {
// Cluster-wide resource
dr = dynClient.Resource(mapping.Resource)
}
// Create or Update the object with SSA
options := metav1.PatchOptions{FieldManager: "sample-controller"}
_, err = dr.Patch(ctx, obj.GetName(), types.ApplyPatchType, jsonObj, options)
if err != nil {
log.Fatalf("error patching, %v\n", err)
}
}
Is there a way to restart a kubernetes deployment using go-client .I have no idea how to achieve this ,help me!
If you run kubectl rollout restart deployment/my-deploy -v=10 you see that kubectl actually sends a PATCH request to the APIServer and sets the .spec.template.metadata.annotations with something like this:
kubectl.kubernetes.io/restartedAt: '2022-11-29T16:33:08+03:30'
So, you can do this with the client-go:
clientset, err :=kubernetes.NewForConfig(config)
if err != nil {
// Do something with err
}
deploymentsClient := clientset.AppsV1().Deployments(namespace)
data := fmt.Sprintf(`{"spec": {"template": {"metadata": {"annotations": {"kubectl.kubernetes.io/restartedAt": "%s"}}}}}`, time.Now().Format("20060102150405"))
deployment, err := deploymentsClient.Patch(ctx, deployment_name, k8stypes.StrategicMergePatchType, []byte(data), v1.PatchOptions{})
if err != nil {
// Do something with err
}
I've declared a kubernetes secret in pulumi like:
const tlsSecret = new k8s.core.v1.Secret('tlsSecret', {
metadata: {
name: 'star.builds.qwil.co'
},
data: {
'tls.crt': tlsCert,
'tls.key': tlsKey
}
});
However, I'm finding that when the secret is created only tls.key is present in the secret. When I look at the Diff View from the pulumi deployment on app.pulumi.com I see the following:
tlsSecret (kubernetes:core:Secret)
+ kubernetes:core/v1:Secret (create)
[urn=urn:pulumi:local::ledger::kubernetes:core/v1:Secret::tlsSecret]
apiVersion: "v1"
data : {
tls.key: "[secret]"
}
kind : "Secret"
metadata : {
labels: {
app.kubernetes.io/managed-by: "pulumi"
}
name : "star.builds.qwil.co"
}
Why is only tls.key being picked up even though I've also specified a tls.crt?
Turns out the variable tlsCert was false-y (I was loading it from config with the wrong key for Config.get()). Pulumi was smart and didn't create a secret with an empty string.
I'm having a bit of a challenge try to build my app which is using the golang client-go library. What the app does is provide and api which then deploys a pod to a kubernetes cluster. Now the app is able to deploy a pod successfully if I use an out of cluster kubernetes(i.e minikube) config which is found in $HOME/.kube/config. See code below that determines which config to use depending on the config path;
package kubernetesinterface
import (
"log"
"os"
core "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth" // load auth packages
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// KubeStruct - struct that uses interface type (useful when testing)
type KubeStruct struct {
clientset kubernetes.Interface
}
// DeployPod - Method that uses a KubeStruct type to deploy deploy simulator pod to kubernetes cluster
func (kube *KubeStruct) DeployPod() bool {
var podObject *core.Pod
podObject = createPodObjects()
_, err := kube.clientset.Core().Pods(podObject.Namespace).Create(podObject)
if err != nil {
log.Println("Failed to create simulator pod: ", err.Error())
return false
}
return true
}
// GetNewClient - function to create a new clientset object to connect to a kubernetes cluster
func GetNewClient() (*KubeStruct, error) {
var kubeConfig *rest.Config
var err error
configPath := os.Getenv("CONFIG_PATH")
if configPath == "" {
log.Println("Using in-cluster configuration")
kubeConfig, err = rest.InClusterConfig()
} else {
log.Println("Using out of cluster config")
kubeConfig, err = clientcmd.BuildConfigFromFlags("", configPath)
}
if err != nil {
log.Println("Error getting configuration ", err.Error())
return nil, err
}
// create clientset for kubernetes cluster
client := KubeStruct{}
client.clientset, err = kubernetes.NewForConfig(kubeConfig)
if err != nil {
log.Println("Error creating clientset for kubernetes cluster ", err.Error())
return nil, err
}
return &client, nil
}
func createPodObjects() *core.Pod {
return &core.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "podname",
Namespace: "default",
Labels: map[string]string{
"app": "podname",
},
},
Spec: core.PodSpec{
Containers: []core.Container{
{
Name: "podname",
Image: os.Getenv("IMAGE"),
ImagePullPolicy: core.PullIfNotPresent,
Command: []string{
"sleep",
"3600",
},
},
},
},
}
}
So if a value exists for CONFIG_PATH, the app works as expected and a pod is deployed to my minikube cluster. Now when the same app is built on gcp, I get the following build error;
Step #1: 2019/03/13 21:25:20 Error getting configuration unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
I have searched online unsuccessfully for a solution so I thought I'd post here.