Kubernetes: need notification if a new namespace is created - kubernetes

So Kubernetes events seems to have options for watch for all kinds of things like pod up/down creation/deletion etc.. - but I want to watch for a namespace creation/deletion itself - and I can't find an option to do that.
I want to know if someone created a namespace or deleted one. Is this possible?
Rgds,
Gopa.

So I got it working, pasting the entire code below for anyone's future reference
package main
import (
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
"github.com/golang/glog"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
func newNamespace(obj interface{}) {
ns := obj.(v1.Object)
glog.Error("New Namespace ", ns.GetName())
}
func modNamespace(objOld interface{}, objNew interface{}) {
}
func delNamespace(obj interface{}) {
ns := obj.(v1.Object)
glog.Error("Del Namespace ", ns.GetName())
}
func watchNamespace(k8s *kubernetes.Clientset) {
// Add watcher for the Namespace.
factory := informers.NewSharedInformerFactory(k8s, 5*time.Second)
nsInformer := factory.Core().V1().Namespaces().Informer()
nsInformerChan := make(chan struct{})
//defer close(nsInformerChan)
defer runtime.HandleCrash()
// Namespace informer state change handler
nsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
// When a new namespace gets created
AddFunc: func(obj interface{}) {
newNamespace(obj)
},
// When a namespace gets updated
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
modNamespace(oldObj, newObj)
},
// When a namespace gets deleted
DeleteFunc: func(obj interface{}) {
delNamespace(obj)
},
})
factory.Start(nsInformerChan)
//go nsInformer.GetController().Run(nsInformerChan)
go nsInformer.Run(nsInformerChan)
}
func main() {
kconfig := os.Getenv("KUBECONFIG")
glog.Error("KCONFIG", kconfig)
var config *rest.Config
var clientset *kubernetes.Clientset
var err error
for {
if config == nil {
config, err = clientcmd.BuildConfigFromFlags("", kconfig)
if err != nil {
glog.Error("Cant create kubernetes config")
time.Sleep(time.Second)
continue
}
}
// creates the clientset
clientset, err = kubernetes.NewForConfig(config)
if err != nil {
glog.Error("Cannot create kubernetes client")
time.Sleep(time.Second)
continue
}
break
}
watchNamespace(clientset)
glog.Error("Watch started")
term := make(chan os.Signal, 1)
signal.Notify(term, os.Interrupt)
signal.Notify(term, syscall.SIGTERM)
select {
case <-term:
}
}

Events are namespaced, they show the events happening in a particular namespace. However -A|--all-namespaces would show events from all the namespaces.
If you want to track the lifecycle(creation|deletion|etc) of the namespaces,then audit.logs are simplest option if not the only option.
Example: Creation of namespace called my-ns:
kubectl create ns my-ns
kubectl get events -n my-ns
No resources found in my-ns namespace.
kubectl describe ns my-ns
Name: my-ns
Labels: <none>
Annotations: <none>
Status: Active
No resource quota.
No LimitRange resource.
Now Here is the output of audit.log at metadata level, this would tell the following:
who created
what created
when created
and lot more.
Example output:
{
"kind":"Event",
"apiVersion":"audit.k8s.io/v1",
"level":"Metadata",
"auditID":"d28619be-0cb7-4d3e-b195-51fb93ae6de4",
"stage":"ResponseComplete",
"requestURI":"/api/v1/namespaces?fieldManager=kubectl-create",
"verb":"create", #<------operation type
"user":{
"username":"kubernetes-admin", #<--------who created
"groups":[
"system:masters",
"system:authenticated"
]
},
"sourceIPs":[
"1.2.3.4"
],
"userAgent":"kubectl/v1.20.0 (linux/amd64) kubernetes/af46c47",
"objectRef":{
"resource":"namespaces", #<---what created
"name":"my-ns", #<---name of resource
"apiVersion":"v1"
},
"responseStatus":{
"metadata":{
},
"code":201
},
"requestReceivedTimestamp":"2021-09-24T16:44:28.094213Z", #<---when created.
"stageTimestamp":"2021-09-24T16:44:28.270294Z",
"annotations":{
"authorization.k8s.io/decision":"allow",
"authorization.k8s.io/reason":""
}
}

$ kubectl get ns --watch-only
# run "kubectl create ns test" from another terminal
test Active 0s
# run "kubectl delete ns test"
test Terminating 23s
test Terminating 28s
test Terminating 28s

Related

Does anyone know the way to do this on a client-go or the API resources that kubectl describe pod uses

I cannot find the appropriate method to do this.
Does anyone know the way to do this on a client-go or the API resources that kubectl describe pod uses?
Here is an example code of getting pod with client-go:
/*
A demonstration of get pod using client-go
Based on client-go examples: https://github.com/kubernetes/client-go/tree/master/examples
To demonstrate, run this file with `go run <filename> --help` to see usage
*/
package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)
func main() {
podName := flag.String("pod-name", "", "name of the required pod")
namespaceName := flag.String("namespace", "", "namespace of the required pod")
var kubeconfig *string
if config, exist := os.LookupEnv("KUBECONFIG"); exist {
kubeconfig = &config
} else if home := homedir.HomeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
podClient := clientset.CoreV1().Pods(*namespaceName)
fmt.Println("Getting pod...")
result, err := podClient.Get(context.TODO(), *podName, v1.GetOptions{})
if err != nil {
panic(err)
}
// Example fields
fmt.Printf("%+v\n", result.Name)
fmt.Printf("%+v\n", result.Namespace)
fmt.Printf("%+v\n", result.Spec.ServiceAccountName)
}
you can see it on the code of the kubectl describe command

Dynamically add label to Pods in Kubernetes with Client-go

I want to create a controller and listen to the pod events when new pod is created (by a deployment) then add all labels belong to deployment to the created pod, is this possible at scale with client-go?
In order to observe pod events, you need to use informers. Informers have built-in optimizations to avoid overloading API servers.
There is a patch method available in the PodInterface that allows you to add a label to a pod.
Here is a sample code for your reference. In the main function, the informer code is added, and the LabelPod function implements the label logic.
package main
import (
"context"
"encoding/json"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
type patchStringValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
func main() {
clientSet := GetK8sClient()
labelOptions := informers.WithTweakListOptions(func(opts *metav1.ListOptions) {
opts.LabelSelector = GetLabelSelectorForDeployment("deployment-name", "namespace-name")
})
informers := informers.NewSharedInformerFactoryWithOptions(clientSet, 10*time.Second, informers.WithNamespace("namespace-name"), labelOptions)
podInformer := informers.Core().V1().Pods()
podInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: handleAdd,
},
)
informers.Start(wait.NeverStop)
informers.WaitForCacheSync(wait.NeverStop)
}
func GetLabelSelectorForDeployment(Name string, Namespace string) string {
clientSet := GetK8sClient()
k8sClient := clientSet.AppsV1()
deployment, _ := k8sClient.Deployments(Namespace).Get(context.Background(), Name, metav1.GetOptions{})
labelSet := labels.Set(deployment.Spec.Selector.MatchLabels)
return string(labelSet.AsSelector().String())
}
func handleAdd(obj interface{}) {
k8sClient := GetK8sClient().CoreV1()
pod := obj.(*v1.Pod)
fmt.Println("Pod", pod.GetName(), pod.Spec.NodeName, pod.Spec.Containers)
payload := []patchStringValue{{
Op: "replace",
Path: "/metadata/labels/testLabel",
Value: "testValue",
}}
payloadBytes, _ := json.Marshal(payload)
_, updateErr := k8sClient.Pods(pod.GetNamespace()).Patch(context.Background(), pod.GetName(), types.JSONPatchType, payloadBytes, metav1.PatchOptions{})
if updateErr == nil {
fmt.Println(fmt.Sprintf("Pod %s labelled successfully.", pod.GetName()))
} else {
fmt.Println(updateErr)
}
}
func GetK8sClient() *kubernetes.Clientset {
config, err := rest.InClusterConfig()
if err != nil {
panic(err.Error())
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
return clientset
}
What about this??
Here are the pods before they are labeled manually:
❯ kubectl get po --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx-deploy-6bdc4445fd-5qlhg 1/1 Running 0 13h app=nginx,pod-template-hash=6bdc4445fd
nginx-deploy-6bdc4445fd-pgkhb 1/1 Running 0 13h app=nginx,pod-template-hash=6bdc4445fd
nginx-deploy-6bdc4445fd-xdz59 1/1 Running 0 13h app=nginx,pod-template-hash=6bdc4445fd
I, here, label the pod "nginx-deploy-6bdc4445fd-5qlhg" with the label "test=2":
❯ kubectl label pod nginx-deploy-6bdc4445fd-5qlhg test=2
pod/nginx-deploy-6bdc4445fd-5qlhg labeled
Here are the pods after they are labeled manually:
❯ kubectl get po --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx-deploy-6bdc4445fd-5qlhg 1/1 Running 0 13h app=nginx,pod-template-hash=6bdc4445fd,test=2
nginx-deploy-6bdc4445fd-pgkhb 1/1 Running 0 13h app=nginx,pod-template-hash=6bdc4445fd
nginx-deploy-6bdc4445fd-xdz59 1/1 Running 0 13h app=nginx,pod-template-hash=6bdc4445fd

Using K8S API to access pod [closed]

Closed. This question does not meet Stack Overflow guidelines. It is not currently accepting answers.
This question does not appear to be about a specific programming problem, a software algorithm, or software tools primarily used by programmers. If you believe the question would be on-topic on another Stack Exchange site, you can leave a comment to explain where the question may be able to be answered.
Closed 1 year ago.
Improve this question
With kubectl we can run the following command
kubectl exec -ti POD_NAME -- pwd
Can I do that from API level? I checked the POD API and seems it is missing there https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/
What I am looking for, is a UI tool to view the files in POD without extra dependency
UPDATE:
I found the following code to exec command in pod
package main
import (
"bytes"
"context"
"flag"
"fmt"
"path/filepath"
corev1 "k8s.io/api/core/v1"
_ "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/util/homedir"
//
// Uncomment to load all auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth"
//
// Or uncomment to load specific auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
)
func main() {
var kubeconfig *string
if home := homedir.HomeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
namespace := "stage"
pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err.Error())
}
fmt.Printf("There are %d pods in the cluster\n", len(pods.Items))
podName := "ubs-job-qa-0"
containerName := "ubs-job"
// https://github.com/kubernetes/kubernetes/blob/release-1.22/test/e2e/framework/exec_util.go
// https://zhimin-wen.medium.com/programing-exec-into-a-pod-5f2a70bd93bb
req := clientset.CoreV1().
RESTClient().
Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec").
Param("container", containerName)
scheme := runtime.NewScheme()
if err := corev1.AddToScheme(scheme); err != nil {
panic("Cannot add scheme")
}
parameterCodec := runtime.NewParameterCodec(scheme)
req.VersionedParams(&corev1.PodExecOptions{
Stdin: false,
Stdout: true,
Stderr: true,
TTY: true,
Container: podName,
Command: []string{"ls", "-la", "--time-style=iso", "."},
}, parameterCodec)
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
panic(err)
}
var stdout, stderr bytes.Buffer
err = exec.Stream(remotecommand.StreamOptions{
Stdin: nil,
Stdout: &stdout,
Stderr: &stderr,
})
if err != nil {
panic(err)
}
text := string(stdout.Bytes())
fmt.Println(text)
}
In your case, use of kubectl is the same as calling the api-server; which in turn call the kubelet on the node and exec your command in the pod namespace.
You can experiment like this:
kubectl proxy --port=8080 &
curl "localhost:8080/api/v1/namespaces/<namespace>/pods/<pod>/exec?command=pwd&stdin=false"
To copy file you can use: kubectl cp --help
You can leverage tools like Kubernetes Dashboard as UI tool or if you wanna go enterprise level, try Rancher

How to restart a deployment in kubernetes using go-client

Is there a way to restart a kubernetes deployment using go-client .I have no idea how to achieve this ,help me!
If you run kubectl rollout restart deployment/my-deploy -v=10 you see that kubectl actually sends a PATCH request to the APIServer and sets the .spec.template.metadata.annotations with something like this:
kubectl.kubernetes.io/restartedAt: '2022-11-29T16:33:08+03:30'
So, you can do this with the client-go:
clientset, err :=kubernetes.NewForConfig(config)
if err != nil {
// Do something with err
}
deploymentsClient := clientset.AppsV1().Deployments(namespace)
data := fmt.Sprintf(`{"spec": {"template": {"metadata": {"annotations": {"kubectl.kubernetes.io/restartedAt": "%s"}}}}}`, time.Now().Format("20060102150405"))
deployment, err := deploymentsClient.Patch(ctx, deployment_name, k8stypes.StrategicMergePatchType, []byte(data), v1.PatchOptions{})
if err != nil {
// Do something with err
}

Issue using in-cluster kubernetes configuration with client-go library on google cloud build

I'm having a bit of a challenge try to build my app which is using the golang client-go library. What the app does is provide and api which then deploys a pod to a kubernetes cluster. Now the app is able to deploy a pod successfully if I use an out of cluster kubernetes(i.e minikube) config which is found in $HOME/.kube/config. See code below that determines which config to use depending on the config path;
package kubernetesinterface
import (
"log"
"os"
core "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth" // load auth packages
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// KubeStruct - struct that uses interface type (useful when testing)
type KubeStruct struct {
clientset kubernetes.Interface
}
// DeployPod - Method that uses a KubeStruct type to deploy deploy simulator pod to kubernetes cluster
func (kube *KubeStruct) DeployPod() bool {
var podObject *core.Pod
podObject = createPodObjects()
_, err := kube.clientset.Core().Pods(podObject.Namespace).Create(podObject)
if err != nil {
log.Println("Failed to create simulator pod: ", err.Error())
return false
}
return true
}
// GetNewClient - function to create a new clientset object to connect to a kubernetes cluster
func GetNewClient() (*KubeStruct, error) {
var kubeConfig *rest.Config
var err error
configPath := os.Getenv("CONFIG_PATH")
if configPath == "" {
log.Println("Using in-cluster configuration")
kubeConfig, err = rest.InClusterConfig()
} else {
log.Println("Using out of cluster config")
kubeConfig, err = clientcmd.BuildConfigFromFlags("", configPath)
}
if err != nil {
log.Println("Error getting configuration ", err.Error())
return nil, err
}
// create clientset for kubernetes cluster
client := KubeStruct{}
client.clientset, err = kubernetes.NewForConfig(kubeConfig)
if err != nil {
log.Println("Error creating clientset for kubernetes cluster ", err.Error())
return nil, err
}
return &client, nil
}
func createPodObjects() *core.Pod {
return &core.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "podname",
Namespace: "default",
Labels: map[string]string{
"app": "podname",
},
},
Spec: core.PodSpec{
Containers: []core.Container{
{
Name: "podname",
Image: os.Getenv("IMAGE"),
ImagePullPolicy: core.PullIfNotPresent,
Command: []string{
"sleep",
"3600",
},
},
},
},
}
}
So if a value exists for CONFIG_PATH, the app works as expected and a pod is deployed to my minikube cluster. Now when the same app is built on gcp, I get the following build error;
Step #1: 2019/03/13 21:25:20 Error getting configuration unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
I have searched online unsuccessfully for a solution so I thought I'd post here.