I am trying to add a new key value pair to existing set of Annotations to a running Pod using the below example code:
import (
"fmt"
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
const (
configPath = "/home/test/.kube/config"
)
func main() {
client, _ := connect()
pod, _ := client.CoreV1().Pods("default").Get(context.TODO(), "nginx-pod",metav1.GetOptions{})
fmt.Println(pod.Name)
annotations := map[string]string{
"foo":"bar",
}
pod.SetAnnotations(annotations)
for name, value := range pod.GetAnnotations() {
fmt.Println("name := ", name, "value =", value)
}
}
func connect() (*kubernetes.Clientset, error) {
restconfig, err := clientcmd.BuildConfigFromFlags("", configPath)
if err != nil {
klog.Exit(err.Error())
}
clientset, err := kubernetes.NewForConfig(restconfig)
if err != nil {
klog.Exit(err.Error())
}
return clientset, nil
}
when i run the above code and use "oc describe pods/nginx-pod i don't see the annotation "foo: bar" under the annotations.
What's the right way to add New Annotations to an existing pod.
You're going to want something along the lines:
...
pod.SetAnnotations(annotations)
client.
CoreV1().
Pods("default").
Update(context.TODO(), pod, metav1.UpdateOptions{})
See: PodInterface
Related
Hi i am trying to get cluster resource details using Application Default Credentials (ADC).
package main
import (
"context"
"encoding/base64"
"flag"
"fmt"
"log"
"google.golang.org/api/container/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // register GCP auth provider
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)
var fProjectId = flag.String("projectId", "", "specify a project id to examine")
func main() {
flag.Parse()
if *fProjectId == "" {
log.Fatal("must specific -projectId")
}
if err := run(context.Background(), *fProjectId); err != nil {
log.Fatal(err)
}
}
func run(ctx context.Context, projectId string) error {
kubeConfig, err := getK8sClusterConfigs(ctx, projectId)
if err != nil {
return err
}
// Just list all the namespaces found in the project to test the API.
for clusterName := range kubeConfig.Clusters {
cfg, err := clientcmd.NewNonInteractiveClientConfig(*kubeConfig, clusterName, &clientcmd.ConfigOverrides{CurrentContext: clusterName}, nil).ClientConfig()
if err != nil {
return fmt.Errorf("failed to create Kubernetes configuration cluster=%s: %w", clusterName, err)
}
k8s, err := kubernetes.NewForConfig(cfg)
if err != nil {
return fmt.Errorf("failed to create Kubernetes client cluster=%s: %w", clusterName, err)
}
ns, err := k8s.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to list namespaces cluster=%s: %w", clusterName, err)
}
log.Printf("Namespaces found in cluster=%s", clusterName)
for _, item := range ns.Items {
log.Println(item.Name)
}
}
return nil
}
func getK8sClusterConfigs(ctx context.Context, projectId string) (*api.Config, error) {
svc, err := container.NewService(ctx)
if err != nil {
return nil, fmt.Errorf("container.NewService: %w", err)
}
// Basic config structure
ret := api.Config{
APIVersion: "v1",
Kind: "Config",
Clusters: map[string]*api.Cluster{}, // Clusters is a map of referencable names to cluster configs
AuthInfos: map[string]*api.AuthInfo{}, // AuthInfos is a map of referencable names to user configs
Contexts: map[string]*api.Context{}, // Contexts is a map of referencable names to context configs
}
// Ask Google for a list of all kube clusters in the given project.
resp, err := svc.Projects.Zones.Clusters.List(projectId, "-").Context(ctx).Do()
if err != nil {
return nil, fmt.Errorf("clusters list project=%s: %w", projectId, err)
}
for _, f := range resp.Clusters {
name := fmt.Sprintf("gke_%s_%s_%s", projectId, f.Zone, f.Name)
cert, err := base64.StdEncoding.DecodeString(f.MasterAuth.ClusterCaCertificate)
if err != nil {
return nil, fmt.Errorf("invalid certificate cluster=%s cert=%s: %w", name, f.MasterAuth.ClusterCaCertificate, err)
}
// example: gke_my-project_us-central1-b_cluster-1 => https://XX.XX.XX.XX
ret.Clusters[name] = &api.Cluster{
CertificateAuthorityData: cert,
Server: "https://" + f.Endpoint,
}
// Just reuse the context name as an auth name.
ret.Contexts[name] = &api.Context{
Cluster: name,
AuthInfo: name,
}
// GCP specific configation; use cloud platform scope.
ret.AuthInfos[name] = &api.AuthInfo{
AuthProvider: &api.AuthProviderConfig{
Name: "gcp",
Config: map[string]string{
"scopes": "https://www.googleapis.com/auth/cloud-platform",
},
},
}
}
return &ret, nil
}
It is giving me the error :
go run main.go -projectId=<Project-id>
2023/01/23 12:13:47 failed to create Kubernetes client cluster=<cluster-name>: The gcp auth plugin has been removed.
Please use the "gke-gcloud-auth-plugin" kubectl/client-go credential plugin instead.
See https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke for further details
exit status 1
It will be helpful for if you guys suggest me how to solve this issue.
Since k8s 1.26 you need to install GKE auth plugin separately. So, depending on your operating system install google-cloud-sdk-gke-gcloud-auth-plugin and it should work. It's pretty well described here:
https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke
After downgrading the dependencies it is working fine
go get k8s.io/client-go/tools/clientcmd#v0.25.5
go get k8s.io/cloud-provider-gcp/pkg/clientauthplugin/gcp#bb1acae5826dc877953d48
How can I use a normal context to configure the kubernetes client-go?
package kube
import (
"fmt"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// GetKubeClient creates a Kubernetes config and client for a given kubeconfig context.
func GetKubeClient(context string) (*rest.Config, kubernetes.Interface, error) {
config, err := configForContext(context)
if err != nil {
return nil, nil, err
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, nil, fmt.Errorf("could not get Kubernetes client: %s", err)
}
return config, client, nil
}
// configForContext creates a Kubernetes REST client configuration for a given kubeconfig context.
func configForContext(context string) (*rest.Config, error) {
config, err := getConfig(context).ClientConfig()
if err != nil {
return nil, fmt.Errorf("could not get Kubernetes config for context %q: %s", context, err)
}
return config, nil
}
// getConfig returns a Kubernetes client config for a given context.
func getConfig(context string) clientcmd.ClientConfig {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
rules.DefaultClientConfig = &clientcmd.DefaultClientConfig
overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults}
if context != "" {
overrides.CurrentContext = context
}
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)
}
If I try this code (got it from helm), the api server is not correctly set and the client wants to connect to the default host localhost:8080.
Found the problem. The implementation of github.com/imdario/mergo changed in a newer version and breaks the actual behavior of generating the client config. So just only use revision 6633656539c1639d9d78127b7d47c622b5d7b6dc like in the official kubernetes cient-go repository.
https://github.com/kubernetes/client-go/issues/415
Currently the example recommends doing something like this:
kconf, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
However that won't allow you to specify the context you want to use. If you look at the source code you'll see that BuildConfigFromFlags is a thin wrapper around NewNonInteractiveDeferredLoadingClientConfig.
If you use NewNonInteractiveDeferredLoadingClientConfig instead you can specify the context like this:
configLoadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}
configOverrides := &clientcmd.ConfigOverrides{CurrentContext: "dev-cluster"}
kconf, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(configLoadingRules, configOverrides).ClientConfig()
if err != nil {
return nil, err
}
Helm 3 does not provide any way to create an action.Configuration structure if the code is running from within the cluster.
I have tried by building my own generic flags:
config, err := rest.InClusterConfig()
if err != nil {
panic(err)
}
insecure := false
genericConfigFlag := &genericclioptions.ConfigFlags{
Timeout: stringptr("0"),
Insecure: &insecure,
APIServer: stringptr(config.Host),
CAFile: stringptr(config.CAFile),
BearerToken: stringptr(config.BearerToken),
ImpersonateGroup: &[]string{},
Namespace: stringptr(namespace),
}
actionConfig := &action.Configuration{
RESTClientGetter: genericConfigFlag,
KubeClient: kube.New(genericConfigFlag),
Log: log.Infof,
}
Unfortunately, this result in a SIGSEGV error later when running action.NewList(actionConfig).Run().
Is it the right way to define an action config for Helm 3 from within a Kubernetes cluster?
This is what I did, and works fine for me (using helm 3.2.0 level sdk code):
imports
import (
"log"
"helm.sh/helm/v3/pkg/action"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest"
)
ActionConfig
func getActionConfig(namespace string) (*action.Configuration, error) {
actionConfig := new(action.Configuration)
var kubeConfig *genericclioptions.ConfigFlags
// Create the rest config instance with ServiceAccount values loaded in them
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
// Create the ConfigFlags struct instance with initialized values from ServiceAccount
kubeConfig = genericclioptions.NewConfigFlags(false)
kubeConfig.APIServer = &config.Host
kubeConfig.BearerToken = &config.BearerToken
kubeConfig.CAFile = &config.CAFile
kubeConfig.Namespace = &namespace
if err := actionConfig.Init(kubeConfig, namespace, os.Getenv("HELM_DRIVER"), log.Printf); err != nil {
return nil, err
}
return actionConfig, nil
}
Usage
actionConfig, kubeConfigFileFullPath, err := getActionConfig(namespace)
listAction := action.NewList(actionConfig)
releases, err := listAction.Run()
if err != nil {
log.Println(err)
}
for _, release := range releases {
log.Println("Release: " + release.Name + " Status: " + release.Info.Status.String())
}
It is not much different from what what you originally did, except the initialization of the actionConfig. It could also be that newer version fixed some issues. Let me know if this works for you.
To run helm 3 in-cluster you need to modify the source code.
Here is the function:
func (c *Configuration) KubernetesClientSet() (kubernetes.Interface, error) {
conf, err := c.RESTClientGetter.ToRESTConfig()
if err != nil {
return nil, errors.Wrap(err, "unable to generate config for kubernetes client")
}
return kubernetes.NewForConfig(conf)
}
This line conf, err := c.RESTClientGetter.ToRESTConfig()
change to conf, err := rest.InClusterConfig()
and compile the code.
You can also try modifying code in a way that resulting binary is universal and can run out of cluster as well as in-cluster.
Let me know if it's helpful and if it solves your problem.
I have a folder with name "myspec" which has some kube-spec files , let's say
pod.yaml , service.yaml, secret.yaml
when I run the command "kubectl create -f myspec" it creates everything , pod , service and secret.
Now I wish to perform the same thing using kubernetes go client library.
I believe the previous poster, meant to post This:
1) You first convert the string to bytes.
2) Then serialize it to a pod.
3) Then create the pod like any other object.
This can be done, without loss of generality, for Pods, Services, ReplicationControllers, Deployments, ConfigMaps, Secrets, and any other kubernetes API object.
example
func CreatePodFromYaml(podAsYaml string, ns string) error {
var p v1.Pod
err := json.Unmarshal([]byte(podAsYaml), &p)
if err != nil {
return err
}
pod, poderr := kubernetesConfig().CoreV1().Pods(ns).Create(&p)
if poderr != nil {
return poderr
}
fmt.Printf("Created Pod %q.\n", pod.GetObjectMeta().GetName())
return nil
}
To compile this code, you'll also need to make the kubernetesConfig object:
func kubernetesConfig() *kubernetes.Clientset {
config, err := clientcmd.BuildConfigFromFlags("", "/$HOME/.kube/config")
if err != nil {
fmt.Println(err.Error())
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
fmt.Println(err.Error())
}
return clientset
}
I am able to create the pod using kubernetes go client library ,
i am passing json file and namespace name to the function and it unmarhshal to the v1.Pod object and call the create function as below
func createPod(b []byte, ns string) {
var p v1.Pod.Name
err := json.Unmarshal(b, &p)
if err != nil {
}
pod, poderr := kubernetesConfig().CoreV1().Pods(ns).Create(&p)
pod.
if poderr != nil {
fmt.Println(poderr.Error())
} else {
fmt.Printf("Created Pod %q.\n", pod.GetObjectMeta().GetName())
}
}
func kubernetesConfig() *kubernetes.Clientset {
config, err := clientcmd.BuildConfigFromFlags("", "/$HOME/.kube/config")
if err != nil {
fmt.Println(err.Error())
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
fmt.Println(err.Error())
}
return clientset
}
I am using DescribeInstances api fetch all my EC2 instances . But I am not able to get any information about my instance. I can only see empty results as success. I am providing my code in steps to reproduce section.
Output I can see as below:
Success {
}
I am also exporting AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY and AWS_REGION
Below is the code Snippet
<pre> <code>
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
)
func main() {
// Load session from shared config
sess, err := session.NewSession()
// Create new EC2 client
ec2Svc := ec2.New(sess)
resp, err := ec2Svc.DescribeInstances(nil)
if err != nil {
fmt.Println("Error", err)
} else {
fmt.Println("Success", resp)
for idx, res := range resp.Reservations {
fmt.Println(" > Number of instances: ", len(res.Instances))
for _, inst := range resp.Reservations[idx].Instances {
fmt.Println(" - Instance ID: ", *inst.InstanceId)
}
}
}
}
</code></pre>