Resource not found error performing SSA create using dynamic client - kubernetes

I was following #ymmt2005 excellent dynamic client guide. All is good until the final step when I make the actual PATCH call, and I get a the server could not find the requested resource error. Just about everything seems right, except I'm unsure about the 'FieldManager' field in the PathOptions struct. I'm not sure what "the actor or entity that is making these changes" refers to. Does this need to match something in my code or system? Any other ideas?
package main
import (
...
)
const resourceYAML = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: mike-nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: 'nginx:latest'
ports:
- containerPort: 80
`
func main() {
ctx := context.Background()
// Create dynamic discovery client from local kubeconfig file
kubePath := filepath.Join(homedir.HomeDir(), ".kube", "config")
cfg, err := clientcmd.BuildConfigFromFlags("", kubePath)
if err != nil {
log.Fatalf("error building config, %v\n", err)
}
dynClient, err := dynamic.NewForConfig(cfg)
if err != nil {
log.Fatalf("error creating client, %v\n", err)
}
disClient, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
log.Fatalf("error creating discovery client, %v\n", err)
}
// Decode YAML manifest & get GVK
decodeUnstr := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
obj := &unstructured.Unstructured{}
_, gvk, err := decodeUnstr.Decode([]byte(resourceYAML), nil, obj)
if err != nil {
log.Fatalf("error decoding manifest, %v\n", err)
}
jsonObj, err := json.Marshal(obj)
if err != nil {
log.Fatalf("error marshaling object, %v\n", err)
}
// Find GVR using GVK
mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(disClient))
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
log.Fatalf("error finding GVR, %v\n", err)
}
// Get REST interface for the GVR, checking for namespace or cluster-wide
var dr dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
// Namespaced resource
dr = dynClient.Resource(mapping.Resource).Namespace(obj.GetNamespace())
} else {
// Cluster-wide resource
dr = dynClient.Resource(mapping.Resource)
}
// Create or Update the object with SSA
options := metav1.PatchOptions{FieldManager: "sample-controller"}
_, err = dr.Patch(ctx, obj.GetName(), types.ApplyPatchType, jsonObj, options)
if err != nil {
log.Fatalf("error patching, %v\n", err)
}
}
[edit] I confirmed that I was only able to use 'Patch' on a resource that already existed. I tweaked the code to use 'Create' to create the resource, then I was able to successfully do a 'Patch' against it to change. To overcome the FieldManager inconsistencies I added Force=true to the PatchOptions which is recommended in the docs anyway. I'd still like to know how I can create if resource doesn't exist and update if it does--maybe just test for exist?

The answer is really trivial. The original code assumes that namespace is provided in the manifest. The deployment endpoint does not automatically set namespace to default if the provided namespace is "", and errors out because "" is not a valid namespace. Therefore, I added logic to set namespace to default if not provided and presto, the server side apply will create the resource if it doesn't exist and update if it does exist. Thanks again #ymmt2005 .
package main
import (
...
)
const resourceYAML = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: mike-nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: 'nginx:latest'
ports:
- containerPort: 80
`
func main() {
ctx := context.Background()
// Create dynamic discovery client from local kubeconfig file
kubePath := filepath.Join(homedir.HomeDir(), ".kube", "config")
cfg, err := clientcmd.BuildConfigFromFlags("", kubePath)
if err != nil {
log.Fatalf("error building config, %v\n", err)
}
dynClient, err := dynamic.NewForConfig(cfg)
if err != nil {
log.Fatalf("error creating client, %v\n", err)
}
disClient, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
log.Fatalf("error creating discovery client, %v\n", err)
}
// Decode YAML manifest & get GVK
decodeUnstr := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
obj := &unstructured.Unstructured{}
_, gvk, err := decodeUnstr.Decode([]byte(resourceYAML), nil, obj)
if err != nil {
log.Fatalf("error decoding manifest, %v\n", err)
}
jsonObj, err := json.Marshal(obj)
if err != nil {
log.Fatalf("error marshaling object, %v\n", err)
}
// Find GVR using GVK
mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(disClient))
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
log.Fatalf("error finding GVR, %v\n", err)
}
// Set Namespace to default if not provided in manifest
var ns string
if ns = obj.GetNamespace(); ns == "" {
ns = "default"
}
// Get REST interface for the GVR, checking for namespace or cluster-wide
var dr dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
// Namespaced resource
dr = dynClient.Resource(mapping.Resource).Namespace(ns)
} else {
// Cluster-wide resource
dr = dynClient.Resource(mapping.Resource)
}
// Create or Update the object with SSA
options := metav1.PatchOptions{FieldManager: "sample-controller"}
_, err = dr.Patch(ctx, obj.GetName(), types.ApplyPatchType, jsonObj, options)
if err != nil {
log.Fatalf("error patching, %v\n", err)
}
}

Related

How to switch kubernetes contexts dynamically with client-go?

I'm building a CLI application that would allow me to run an arbitrary command in my shell against any kube cluster in my kubeconfig that matches a given regex. I want to use the official client-go package to accomplish this, but for some reason, switching kube contexts is less than intuitive. So I'm starting by modifying the example out-of-cluster program in the repo, and I'm struggling with just switching the context to the one I specify. Here is the code I started with, which gets the number of pods in the cluster loaded in the kubeconfig:
package main
import (
"context"
"flag"
"fmt"
"path/filepath"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)
func main() {
var kubeconfig *string
if home := homedir.HomeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
pods, err := clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err.Error())
}
fmt.Printf("There are %d pods in the test cluster\n", len(pods.Items))
}
Unfortunately, I cannot figure out how to load a specific cluster with a name as defined in my kubeconfig. I would love to have a sort of SwitchContext("cluster-name") function, but the number of Configs, ClientConfigs, RawConfigs, and restclient.Configs are confusing me. Any help would be appreciated!
System: Ubuntu 22.04, Intel, kube server version 1.23.8-gke.1900, client version 1.25.3
You can override the current context via NewNonInteractiveDeferredLoadingClientConfig method from clientcmd package.
package main
import (
"context"
"flag"
"fmt"
"k8s.io/client-go/rest"
"path/filepath"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)
func main() {
var kubeconfig *string
if home := homedir.HomeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
}
// using `contextName` context in kubeConfig
contextName := "gce"
config, err = buildConfigWithContextFromFlags(contextName, *kubeconfig)
if err != nil {
panic(err)
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
pods, err := clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err.Error())
}
fmt.Printf("There are %d pods in the test cluster\n", len(pods.Items))
}
func buildConfigWithContextFromFlags(context string, kubeconfigPath string) (*rest.Config, error) {
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
&clientcmd.ConfigOverrides{
CurrentContext: context,
}).ClientConfig()
}

Kubebuilder: resource deletion not having expected side effects

I'm creating a custom resource definition (CRD) with an associated controller using kubebuilder. My controller reconcile loop creates a deployment sub-resource and parents it to the custom resource using controllerutil.SetControllerReference(&myResource, deployment, r.Scheme). I've also configured my reconciler so "own" the sub-resource, as follows:
// SetupWithManager sets up the controller with the Manager.
func (r *MyResourceReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&mygroupv1alpha1.MyResource{}).
Owns(&appsv1.Deployment{}).
Complete(r)
}
However, when I run my controller locally using make run, I noticed that deleting the my CR (the root object) doesn't cause the Deployment sub-resource to get garbage collected. I also noticed that deleting the Deployment sub-resource doesn't trigger my reconciler to run. Why is this? Is there something I'm not doing or is this possibly a limitation of local development/testing?
Using #coderanger's hint, I could see that the metadata.ownerReferences weren't being set correctly when running the following command:
kubectl get deployments sample-deployment -o yaml
The problem was my controller's reconcile code. I was calling controllerutil.SetControllerReference(&myResource, deployment, r.Scheme) only after I'd already created and persisted the Deployment.
Buggy code
log.Info("Creating a deployment")
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: myResource.Namespace,
},
Spec: deploymentSpec,
}
if err = r.Create(ctx, deployment); err != nil {
log.Error(err, "Failed to create deployment")
if errors.IsInvalid(err) {
// Don't retry on validation errors
err = nil
}
return ctrl.Result{}, err
}
// Establish the parent-child relationship between my resource and the deployment
log.Info("Making my resource a parent of the deployment")
if err = controllerutil.SetControllerReference(&myResource, deployment, r.Scheme); err != nil {
log.Error(err, "Failed to set deployment controller reference")
return ctrl.Result{}, err
}
To fix it, I needed to swap the order of the call to r.Create and controllerutil.SetControllerReference:
Working code
log.Info("Creating a deployment")
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: myResource.Namespace,
},
Spec: deploymentSpec,
}
// Establish the parent-child relationship between my resource and the deployment
log.Info("Making my resource a parent of the deployment")
if err = controllerutil.SetControllerReference(&myResource, deployment, r.Scheme); err != nil {
log.Error(err, "Failed to set deployment controller reference")
return ctrl.Result{}, err
}
// Create the deployment with the parent/child relationship configured
if err = r.Create(ctx, deployment); err != nil {
log.Error(err, "Failed to create deployment")
if errors.IsInvalid(err) {
// Don't retry on validation errors
err = nil
}
return ctrl.Result{}, err
}
I was able to confirm that this worked by looking at the metadata.ownerReferences YAML data for my created deployment (using the command referenced above).
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: "2021-08-02T16:22:04Z"
generation: 1
name: sample-deployment
namespace: default
ownerReferences:
- apiVersion: resources.mydomain.io/v1alpha1
blockOwnerDeletion: true
controller: true
kind: MyResource
name: myresource-sample
uid: 6ebb146c-afc7-4601-bd75-58efc29beac9
resourceVersion: "569913"
uid: d9a4496f-7418-4831-ab87-4804dcd1f8aa

Access to metric-server from a pod

I have deployed the metric-server on my kubernetes cluster and it's just working fine as I run the following command:
kubectl get --raw /apis/metrics.k8s.io/v1beta1/nodes
I want to access the metric-server from a pod. For that, I use the service/metric-server's IP and it is in the same namespace that the metric-server is. The way I'm trying to access to the metric is like this:
myurl := fmt.Sprintf("https://%s:%s/apis/metrics.k8s.io/v1beta1/nodes/", serviceHost, servicePort)
u, err := url.Parse(myurl)
if err != nil {
panic(err)
}
req, err := http.NewRequest(httpMethod, u.String(), nil)
if err != nil {
log.Printf("Cant sned req: %s", err)
}
caToken, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
if err != nil {
panic(err) // cannot find token file
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", string(caToken)))
caCertPool := x509.NewCertPool()
caCert, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")
if err != nil {
panic(err)
}
caCertPool.AppendCertsFromPEM(caCert)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: caCertPool,
},
},
}
resp, err := client.Do(req)
if err != nil {
log.Printf("sending helm deploy payload failed: %s", err.Error())
panic(err)
}
This is not working nad the logs result for the pod is:
Get https://METRIC-SERVER-SERVICE-IP/apis/metrics.k8s.io/v1beta1/nodes: x509: certificate is valid for 127.0.0.1, not METRIC-SERVER-SERVICE-IP
Is this the right way to access the metric-server from a pod?
what i did so i can access the metrics-server on a local deployment:
set proper rbac
inside the pod :
export CURL_CA_BUNDLE=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
curl -H "Authorization: Bearer $TOKEN" -k https://10.100.123.57/apis/metrics.k8s.io/v1beta1/nodes

Issue using in-cluster kubernetes configuration with client-go library on google cloud build

I'm having a bit of a challenge try to build my app which is using the golang client-go library. What the app does is provide and api which then deploys a pod to a kubernetes cluster. Now the app is able to deploy a pod successfully if I use an out of cluster kubernetes(i.e minikube) config which is found in $HOME/.kube/config. See code below that determines which config to use depending on the config path;
package kubernetesinterface
import (
"log"
"os"
core "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth" // load auth packages
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// KubeStruct - struct that uses interface type (useful when testing)
type KubeStruct struct {
clientset kubernetes.Interface
}
// DeployPod - Method that uses a KubeStruct type to deploy deploy simulator pod to kubernetes cluster
func (kube *KubeStruct) DeployPod() bool {
var podObject *core.Pod
podObject = createPodObjects()
_, err := kube.clientset.Core().Pods(podObject.Namespace).Create(podObject)
if err != nil {
log.Println("Failed to create simulator pod: ", err.Error())
return false
}
return true
}
// GetNewClient - function to create a new clientset object to connect to a kubernetes cluster
func GetNewClient() (*KubeStruct, error) {
var kubeConfig *rest.Config
var err error
configPath := os.Getenv("CONFIG_PATH")
if configPath == "" {
log.Println("Using in-cluster configuration")
kubeConfig, err = rest.InClusterConfig()
} else {
log.Println("Using out of cluster config")
kubeConfig, err = clientcmd.BuildConfigFromFlags("", configPath)
}
if err != nil {
log.Println("Error getting configuration ", err.Error())
return nil, err
}
// create clientset for kubernetes cluster
client := KubeStruct{}
client.clientset, err = kubernetes.NewForConfig(kubeConfig)
if err != nil {
log.Println("Error creating clientset for kubernetes cluster ", err.Error())
return nil, err
}
return &client, nil
}
func createPodObjects() *core.Pod {
return &core.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "podname",
Namespace: "default",
Labels: map[string]string{
"app": "podname",
},
},
Spec: core.PodSpec{
Containers: []core.Container{
{
Name: "podname",
Image: os.Getenv("IMAGE"),
ImagePullPolicy: core.PullIfNotPresent,
Command: []string{
"sleep",
"3600",
},
},
},
},
}
}
So if a value exists for CONFIG_PATH, the app works as expected and a pod is deployed to my minikube cluster. Now when the same app is built on gcp, I get the following build error;
Step #1: 2019/03/13 21:25:20 Error getting configuration unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
I have searched online unsuccessfully for a solution so I thought I'd post here.

How do i create a namespace using kubernetes go-client from running container inside a cluster

I have a Kubernetes cluster and have a running container (X). From this container i want to create a new namespace, deploy a pod in this name space and spawn container(Y). I know kubernetes provides REST APIs. however, i am exploring goClient to do the same and not sure how to use namespace creation api.
import (
"github.com/golang/glog"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
clientConfig, err := config.createClientConfigFromFile()
if err != nil {
glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err)
}
clientset, err := clientset.NewForConfig(clientConfig)
if err != nil {
glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err)
}
nsSpec := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}
_, err := clientset.Core().Namespaces().Create(nsSpec)
}
This one is works for me:
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
nsName := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "my-new-namespace",
},
}
clientset.CoreV1().Namespaces().Create(context.Background(), nsName, metav1.CreateOptions{})