Kubernetes go client api for log of a particular pod - kubernetes

I am using kube go client with kube api to access kube data. I am currently not finding any api call for logs of a particular pod.
kubectl logs pod-name
returns the logs for a particular pod. How do I do this using go client?
I am using v1.0.6 of kubernetes.
I can get the pod by using
client.Pods("namespace").Get("pod-name")

Client Go has offered a function GetLogs for this, which has been answered in How to get logs from kubernetes using Go?
Looking at how kubectl implements its commands can be helpful when getting a feel for how to use the client library. In this case, kubectl's implementation of the logs command looks like this:
req := client.RESTClient.Get().
Namespace(namespace).
Name(podID).
Resource("pods").
SubResource("log").
Param("follow", strconv.FormatBool(logOptions.Follow)).
Param("container", logOptions.Container).
Param("previous", strconv.FormatBool(logOptions.Previous)).
Param("timestamps", strconv.FormatBool(logOptions.Timestamps))
if logOptions.SinceSeconds != nil {
req.Param("sinceSeconds", strconv.FormatInt(*logOptions.SinceSeconds, 10))
}
if logOptions.SinceTime != nil {
req.Param("sinceTime", logOptions.SinceTime.Format(time.RFC3339))
}
if logOptions.LimitBytes != nil {
req.Param("limitBytes", strconv.FormatInt(*logOptions.LimitBytes, 10))
}
if logOptions.TailLines != nil {
req.Param("tailLines", strconv.FormatInt(*logOptions.TailLines, 10))
}
readCloser, err := req.Stream()
if err != nil {
return err
}
defer readCloser.Close()
_, err = io.Copy(out, readCloser)
return err

type Pipe struct {
InReader *io.PipeReader
InWriter *io.PipeWriter
OutReader *io.PipeReader
OutWriter *io.PipeWriter
}
req := client.RESTClient().Get().Resource("pods").
Name(option.Name).Namespace(option.Namespace).SubResource("log")
opt := &coreV1.PodLogOptions{
Follow: option.Follow,
Previous: option.Previous,
SinceSeconds: option.SinceSeconds,
Timestamps: option.Timestamps,
TailLines: option.TailLines,
LimitBytes: option.LimitBytes,
}
if option.Container != "" {
opt.Container = option.Container
}
req.VersionedParams(
opt,
scheme.ParameterCodec,
)
exec, err := remotecommand.NewSPDYExecutor(k.cli.kubeConfig, http.MethodGet, req.URL())
if err != nil {
return err
}
err = exec.Stream(remotecommand.StreamOptions{
Stdin: pipe.InReader,
Stdout: pipe.OutWriter,
Stderr: pipe.OutWriter,
Tty: true,
})
if err != nil {
return err
}
return nil

Related

GKE auth using application default credentials

Hi i am trying to get cluster resource details using Application Default Credentials (ADC).
package main
import (
"context"
"encoding/base64"
"flag"
"fmt"
"log"
"google.golang.org/api/container/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // register GCP auth provider
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)
var fProjectId = flag.String("projectId", "", "specify a project id to examine")
func main() {
flag.Parse()
if *fProjectId == "" {
log.Fatal("must specific -projectId")
}
if err := run(context.Background(), *fProjectId); err != nil {
log.Fatal(err)
}
}
func run(ctx context.Context, projectId string) error {
kubeConfig, err := getK8sClusterConfigs(ctx, projectId)
if err != nil {
return err
}
// Just list all the namespaces found in the project to test the API.
for clusterName := range kubeConfig.Clusters {
cfg, err := clientcmd.NewNonInteractiveClientConfig(*kubeConfig, clusterName, &clientcmd.ConfigOverrides{CurrentContext: clusterName}, nil).ClientConfig()
if err != nil {
return fmt.Errorf("failed to create Kubernetes configuration cluster=%s: %w", clusterName, err)
}
k8s, err := kubernetes.NewForConfig(cfg)
if err != nil {
return fmt.Errorf("failed to create Kubernetes client cluster=%s: %w", clusterName, err)
}
ns, err := k8s.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to list namespaces cluster=%s: %w", clusterName, err)
}
log.Printf("Namespaces found in cluster=%s", clusterName)
for _, item := range ns.Items {
log.Println(item.Name)
}
}
return nil
}
func getK8sClusterConfigs(ctx context.Context, projectId string) (*api.Config, error) {
svc, err := container.NewService(ctx)
if err != nil {
return nil, fmt.Errorf("container.NewService: %w", err)
}
// Basic config structure
ret := api.Config{
APIVersion: "v1",
Kind: "Config",
Clusters: map[string]*api.Cluster{}, // Clusters is a map of referencable names to cluster configs
AuthInfos: map[string]*api.AuthInfo{}, // AuthInfos is a map of referencable names to user configs
Contexts: map[string]*api.Context{}, // Contexts is a map of referencable names to context configs
}
// Ask Google for a list of all kube clusters in the given project.
resp, err := svc.Projects.Zones.Clusters.List(projectId, "-").Context(ctx).Do()
if err != nil {
return nil, fmt.Errorf("clusters list project=%s: %w", projectId, err)
}
for _, f := range resp.Clusters {
name := fmt.Sprintf("gke_%s_%s_%s", projectId, f.Zone, f.Name)
cert, err := base64.StdEncoding.DecodeString(f.MasterAuth.ClusterCaCertificate)
if err != nil {
return nil, fmt.Errorf("invalid certificate cluster=%s cert=%s: %w", name, f.MasterAuth.ClusterCaCertificate, err)
}
// example: gke_my-project_us-central1-b_cluster-1 => https://XX.XX.XX.XX
ret.Clusters[name] = &api.Cluster{
CertificateAuthorityData: cert,
Server: "https://" + f.Endpoint,
}
// Just reuse the context name as an auth name.
ret.Contexts[name] = &api.Context{
Cluster: name,
AuthInfo: name,
}
// GCP specific configation; use cloud platform scope.
ret.AuthInfos[name] = &api.AuthInfo{
AuthProvider: &api.AuthProviderConfig{
Name: "gcp",
Config: map[string]string{
"scopes": "https://www.googleapis.com/auth/cloud-platform",
},
},
}
}
return &ret, nil
}
It is giving me the error :
go run main.go -projectId=<Project-id>
2023/01/23 12:13:47 failed to create Kubernetes client cluster=<cluster-name>: The gcp auth plugin has been removed.
Please use the "gke-gcloud-auth-plugin" kubectl/client-go credential plugin instead.
See https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke for further details
exit status 1
It will be helpful for if you guys suggest me how to solve this issue.
Since k8s 1.26 you need to install GKE auth plugin separately. So, depending on your operating system install google-cloud-sdk-gke-gcloud-auth-plugin and it should work. It's pretty well described here:
https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke
After downgrading the dependencies it is working fine
go get k8s.io/client-go/tools/clientcmd#v0.25.5
go get k8s.io/cloud-provider-gcp/pkg/clientauthplugin/gcp#bb1acae5826dc877953d48

How to use a context other than the current context in client-go to access multiple clusters? [duplicate]

How can I use a normal context to configure the kubernetes client-go?
package kube
import (
"fmt"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// GetKubeClient creates a Kubernetes config and client for a given kubeconfig context.
func GetKubeClient(context string) (*rest.Config, kubernetes.Interface, error) {
config, err := configForContext(context)
if err != nil {
return nil, nil, err
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, nil, fmt.Errorf("could not get Kubernetes client: %s", err)
}
return config, client, nil
}
// configForContext creates a Kubernetes REST client configuration for a given kubeconfig context.
func configForContext(context string) (*rest.Config, error) {
config, err := getConfig(context).ClientConfig()
if err != nil {
return nil, fmt.Errorf("could not get Kubernetes config for context %q: %s", context, err)
}
return config, nil
}
// getConfig returns a Kubernetes client config for a given context.
func getConfig(context string) clientcmd.ClientConfig {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
rules.DefaultClientConfig = &clientcmd.DefaultClientConfig
overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults}
if context != "" {
overrides.CurrentContext = context
}
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)
}
If I try this code (got it from helm), the api server is not correctly set and the client wants to connect to the default host localhost:8080.
Found the problem. The implementation of github.com/imdario/mergo changed in a newer version and breaks the actual behavior of generating the client config. So just only use revision 6633656539c1639d9d78127b7d47c622b5d7b6dc like in the official kubernetes cient-go repository.
https://github.com/kubernetes/client-go/issues/415
Currently the example recommends doing something like this:
kconf, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
However that won't allow you to specify the context you want to use. If you look at the source code you'll see that BuildConfigFromFlags is a thin wrapper around NewNonInteractiveDeferredLoadingClientConfig.
If you use NewNonInteractiveDeferredLoadingClientConfig instead you can specify the context like this:
configLoadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}
configOverrides := &clientcmd.ConfigOverrides{CurrentContext: "dev-cluster"}
kconf, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(configLoadingRules, configOverrides).ClientConfig()
if err != nil {
return nil, err
}

Run helm3 client from in-cluster

Helm 3 does not provide any way to create an action.Configuration structure if the code is running from within the cluster.
I have tried by building my own generic flags:
config, err := rest.InClusterConfig()
if err != nil {
panic(err)
}
insecure := false
genericConfigFlag := &genericclioptions.ConfigFlags{
Timeout: stringptr("0"),
Insecure: &insecure,
APIServer: stringptr(config.Host),
CAFile: stringptr(config.CAFile),
BearerToken: stringptr(config.BearerToken),
ImpersonateGroup: &[]string{},
Namespace: stringptr(namespace),
}
actionConfig := &action.Configuration{
RESTClientGetter: genericConfigFlag,
KubeClient: kube.New(genericConfigFlag),
Log: log.Infof,
}
Unfortunately, this result in a SIGSEGV error later when running action.NewList(actionConfig).Run().
Is it the right way to define an action config for Helm 3 from within a Kubernetes cluster?
This is what I did, and works fine for me (using helm 3.2.0 level sdk code):
imports
import (
"log"
"helm.sh/helm/v3/pkg/action"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest"
)
ActionConfig
func getActionConfig(namespace string) (*action.Configuration, error) {
actionConfig := new(action.Configuration)
var kubeConfig *genericclioptions.ConfigFlags
// Create the rest config instance with ServiceAccount values loaded in them
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
// Create the ConfigFlags struct instance with initialized values from ServiceAccount
kubeConfig = genericclioptions.NewConfigFlags(false)
kubeConfig.APIServer = &config.Host
kubeConfig.BearerToken = &config.BearerToken
kubeConfig.CAFile = &config.CAFile
kubeConfig.Namespace = &namespace
if err := actionConfig.Init(kubeConfig, namespace, os.Getenv("HELM_DRIVER"), log.Printf); err != nil {
return nil, err
}
return actionConfig, nil
}
Usage
actionConfig, kubeConfigFileFullPath, err := getActionConfig(namespace)
listAction := action.NewList(actionConfig)
releases, err := listAction.Run()
if err != nil {
log.Println(err)
}
for _, release := range releases {
log.Println("Release: " + release.Name + " Status: " + release.Info.Status.String())
}
It is not much different from what what you originally did, except the initialization of the actionConfig. It could also be that newer version fixed some issues. Let me know if this works for you.
To run helm 3 in-cluster you need to modify the source code.
Here is the function:
func (c *Configuration) KubernetesClientSet() (kubernetes.Interface, error) {
conf, err := c.RESTClientGetter.ToRESTConfig()
if err != nil {
return nil, errors.Wrap(err, "unable to generate config for kubernetes client")
}
return kubernetes.NewForConfig(conf)
}
This line conf, err := c.RESTClientGetter.ToRESTConfig()
change to conf, err := rest.InClusterConfig()
and compile the code.
You can also try modifying code in a way that resulting binary is universal and can run out of cluster as well as in-cluster.
Let me know if it's helpful and if it solves your problem.

In which part of the codes, the scheduler gets pod and node information from the etcd and API server in kubernetes?

Based on the GitHub repository as follow:
https://github.com/kubernetes/kubernetes
you need to refer the below file
https://github.com/kubernetes/kubernetes/blob/master/pkg/scheduler/core/generic_scheduler.go
below code snippet retrieves all the available nodes and a node is identified that is best suitable for running a specific pod
nodes, err := nodeLister.List()
if err != nil {
return result, err
}
if len(nodes) == 0 {
return result, ErrNoNodesAvailable
}
if err := g.snapshot(); err != nil {
return result, err
}
trace.Step("Computing predicates")
startPredicateEvalTime := time.Now()
filteredNodes, failedPredicateMap, err := g.findNodesThatFit(pod, nodes)

how to implement `kubectl create -f pod.yaml` using kubernetes go client library

I have a folder with name "myspec" which has some kube-spec files , let's say
pod.yaml , service.yaml, secret.yaml
when I run the command "kubectl create -f myspec" it creates everything , pod , service and secret.
Now I wish to perform the same thing using kubernetes go client library.
I believe the previous poster, meant to post This:
1) You first convert the string to bytes.
2) Then serialize it to a pod.
3) Then create the pod like any other object.
This can be done, without loss of generality, for Pods, Services, ReplicationControllers, Deployments, ConfigMaps, Secrets, and any other kubernetes API object.
example
func CreatePodFromYaml(podAsYaml string, ns string) error {
var p v1.Pod
err := json.Unmarshal([]byte(podAsYaml), &p)
if err != nil {
return err
}
pod, poderr := kubernetesConfig().CoreV1().Pods(ns).Create(&p)
if poderr != nil {
return poderr
}
fmt.Printf("Created Pod %q.\n", pod.GetObjectMeta().GetName())
return nil
}
To compile this code, you'll also need to make the kubernetesConfig object:
func kubernetesConfig() *kubernetes.Clientset {
config, err := clientcmd.BuildConfigFromFlags("", "/$HOME/.kube/config")
if err != nil {
fmt.Println(err.Error())
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
fmt.Println(err.Error())
}
return clientset
}
I am able to create the pod using kubernetes go client library ,
i am passing json file and namespace name to the function and it unmarhshal to the v1.Pod object and call the create function as below
func createPod(b []byte, ns string) {
var p v1.Pod.Name
err := json.Unmarshal(b, &p)
if err != nil {
}
pod, poderr := kubernetesConfig().CoreV1().Pods(ns).Create(&p)
pod.
if poderr != nil {
fmt.Println(poderr.Error())
} else {
fmt.Printf("Created Pod %q.\n", pod.GetObjectMeta().GetName())
}
}
func kubernetesConfig() *kubernetes.Clientset {
config, err := clientcmd.BuildConfigFromFlags("", "/$HOME/.kube/config")
if err != nil {
fmt.Println(err.Error())
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
fmt.Println(err.Error())
}
return clientset
}