I want to create a service on kubernetes which manages helm charts on the cluster. It installs charts from a private chart repository. Since I didn't find any documents on how to use helm client api, I was looking for some samples or guidelines for creating a service on top of helm client.
FOR HELM3
As other answers pointed, with Helm 2, you need to talk with tiller which complicates stuff.
It is way more clean with Helm 3 since tiller was removed and helm client directly communicates with Kubernetes API Server.
Here is an example code to install a helm chart programmatically with helm3:
package main
import (
"fmt"
"os"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/kube"
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
func main() {
chartPath := "/tmp/my-chart-0.1.0.tgz"
chart, err := loader.Load(chartPath)
if err != nil {
panic(err)
}
kubeconfigPath := "/tmp/my-kubeconfig"
releaseName := "my-release"
releaseNamespace := "default"
actionConfig := new(action.Configuration)
if err := actionConfig.Init(kube.GetConfig(kubeconfigPath, "", releaseNamespace), releaseNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) {
fmt.Sprintf(format, v)
}); err != nil {
panic(err)
}
iCli := action.NewInstall(actionConfig)
iCli.Namespace = releaseNamespace
iCli.ReleaseName = releaseName
rel, err := iCli.Run(chart, nil)
if err != nil {
panic(err)
}
fmt.Println("Successfully installed release: ", rel.Name)
}
Since it took me some time to get this working here is a minimal example (no error handling, left details about kube config, ...) for listing release names:
package main
import (
"k8s.io/client-go/kubernetes"
"k8s.io/helm/pkg/helm"
"k8s.io/helm/pkg/helm/portforwarder"
)
func main() {
// omit getting kubeConfig, see: https://github.com/kubernetes/client-go/tree/master/examples
// get kubernetes client
client, _ := kubernetes.NewForConfig(kubeConfig)
// port forward tiller
tillerTunnel, _ := portforwarder.New("kube-system", client, config)
// new helm client
helmClient := helm.NewClient(helm.Host(host))
// list/print releases
resp, _ := helmClient.ListReleases()
for _, release := range resp.Releases {
fmt.Println(release.GetName())
}
}
I was long trying to set up Helm installation with --set values, and I found that the best place to look currently available functionality is official helm documentation example and official Go docs for the client.
This only pertains to Helm 3.
Here's an example I managed to get working by using the resources linked above.
I haven't found a more elegant way to define values rather than recursively asking for the map[string]interface{}, so if anyone knows a better way, please let me know.
Values should be roughly equivalent to:
helm install myrelease /mypath --set redis.sentinel.masterName=BigMaster,redis.sentinel.pass="random" ... etc
Notice the use of settings.RESTClientGetter(), rather than kube.Get, as in other answers. I found kube.Get to be causing nasty conflicts with k8s clients.
package main
import (
"log"
"os"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/release"
)
func main(){
chartPath := "/mypath"
namespace := "default"
releaseName := "myrelease"
settings := cli.New()
actionConfig := new(action.Configuration)
// You can pass an empty string instead of settings.Namespace() to list
// all namespaces
if err := actionConfig.Init(settings.RESTClientGetter(), namespace,
os.Getenv("HELM_DRIVER"), log.Printf); err != nil {
log.Printf("%+v", err)
os.Exit(1)
}
// define values
vals := map[string]interface{}{
"redis": map[string]interface{}{
"sentinel": map[string]interface{}{
"masterName": "BigMaster",
"pass": "random",
"addr": "localhost",
"port": "26379",
},
},
}
// load chart from the path
chart, err := loader.Load(chartPath)
if err != nil {
panic(err)
}
client := action.NewInstall(actionConfig)
client.Namespace = namespace
client.ReleaseName = releaseName
// client.DryRun = true - very handy!
// install the chart here
rel, err := client.Run(chart, vals)
if err != nil {
panic(err)
}
log.Printf("Installed Chart from path: %s in namespace: %s\n", rel.Name, rel.Namespace)
// this will confirm the values set during installation
log.Println(rel.Config)
}
I was looking for the same answer, since I do know the solution now, sharing it here.
What you are looking for is to write a wrapper around helm library.
First you need a client which speaks to the tiller of your cluster. For that you need to create a tunnel to the tiller from your localhost. Use this (its the same link as kiran shared.)
Setup the Helm environement variables look here
Use this next. It will return a helm client. (you might need to write a wrapper around it to work with your setup of clusters)
After you get the *helm.Client handle, you can use helm's client API given here. You just have to use the one you need with the appropriate values.
You might need some utility functions defined here, like loading a chart as a folder/archive/file.
If you want to do something more, you pretty much locate the method in the doc and call it using the client.
Related
I know, for example, that you can get the lastUpdateTime of a Deployment with kubectl:
kubectl get deploy <deployment-name> -o jsonpath={.status.conditions[1].lastUpdateTime}
Or via client-go:
func deploymentCheck(namespace string, clientset *kubernetes.Clientset) bool {
// get the deployments in the namespace
deployments, err := clientset.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{})
if errors.IsNotFound(err) {
log.Fatal("\nNo deployments in the namespace", err)
} else if err != nil {
log.Fatal("\nFailed to fetch deployments in the namespace", err)
}
var dptNames []string
for _, dpt := range deployments.Items {
dptNames = append(dptNames, dpt.Name)
}
// check the last update time of the deployments
for _, dpt := range deployments.Items {
lastUpdateTime := dpt.Status.Conditions[1].LastUpdateTime
dptAge := time.Since(lastUpdateTime.Time)
fmt.Printf("\nDeployment %v age: %v", dpt.Name, dptAge)
}
}
The equivalent of lastUpdateTime := dpt.Status.Conditions[1].LastUpdateTime for a StatefulSet doesn't seem to exist.
So, how can I get the lastUpdateTime of a StatefulSet?
I noticed that the only things that change after someone edits a given resource are the resource's lastAppliedConfiguration, Generation and ObservedGeneration. So, I stored them in lists:
for _, deployment := range deployments.Items {
deploymentNames = append(deploymentNames, deployment.Name)
lastAppliedConfig := deployment.GetAnnotations()["kubectl.kubernetes.io/last-applied-configuration"]
lastAppliedConfigs = append(lastAppliedConfigs, lastAppliedConfig)
generations = append(generations, deployment.Generation)
observedGenerations = append(observedGenerations, deployment.Status.ObservedGeneration)
}
Here's the full function:
func DeploymentCheck(namespace string, clientset *kubernetes.Clientset) ([]string, []string, []int64, []int64) {
var deploymentNames []string
var lastAppliedConfigs []string
var generations []int64
var observedGenerations []int64
deployments, err := clientset.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{})
if errors.IsNotFound(err) {
log.Print("No deployments in the namespace", err)
} else if err != nil {
log.Print("Failed to fetch deployments in the namespace", err)
}
for _, deployment := range deployments.Items {
deploymentNames = append(deploymentNames, deployment.Name)
lastAppliedConfig := deployment.GetAnnotations()["kubectl.kubernetes.io/last-applied-configuration"]
lastAppliedConfigs = append(lastAppliedConfigs, lastAppliedConfig)
generations = append(generations, deployment.Generation)
observedGenerations = append(observedGenerations, deployment.Status.ObservedGeneration)
}
return deploymentNames, lastAppliedConfigs, generations, observedGenerations
}
I use all this information to instantiate a struct called Namespace, which contains all major resources a k8s namespace can have.
Then, after a given time I check the same namespace again and check if its resources had any changes:
if !reflect.DeepEqual(namespace.stsLastAppliedConfig, namespaceCopy.stsLastAppliedConfig) {
...
}
else if !reflect.DeepEqual(namespace.stsGeneration, namespaceCopy.stsGeneration) {
...
}
else if !reflect.DeepEqual(namespace.stsObservedGeneration, namespaceCopy.stsObservedGeneration) {
...
}
So, the only workaround I found was to compare the resource's configuration, including StatefulSets', after a given time. Apparently, for some resources you cannot get any information about their lastUpdateTime.
I also found out that lastUpdateTime is actually not reliable, as it understands minor cluster changes as the resource's change. For example, if a cluster rotates and kills all pods, the lastUpdateTime of a Deployment will update its time. That's not what I wanted. I wanted to detect user changes to resources, like when someone applies an edited yaml file or run kubectl edit.
#hypperster , I hope it helps.
I've recently shifted from python to golang. I had been using python to work with GCP.
I used to pass in the scopes and mention the discovery client I wanted to create like this :
def get_client(scopes, api, version="v1"):
service_account_json = os.environ.get("SERVICE_ACCOUNT_KEY_JSON", None)
if service_account_json is None:
sys.exit("Exiting !!! No SSH_KEY_SERVICE_ACCOUNT env var found.")
credentials = service_account.Credentials.from_service_account_info(
json.loads(b64decode(service_account_json)), scopes=scopes
)
return discovery.build(api, version, credentials=credentials, cache_discovery=False)
And this would create my desired discovery client, whether it be compute engine service or sqladmin
However in go I don't seem to find this.
I found this : https://pkg.go.dev/google.golang.org/api/discovery/v1
For any client that I want to create I would've to import that and then create that, like this :
https://cloud.google.com/resource-manager/reference/rest/v1/projects/list#examples
package main
import (
"fmt"
"log"
"golang.org/x/net/context"
"golang.org/x/oauth2/google"
"google.golang.org/api/cloudresourcemanager/v1"
)
func main() {
ctx := context.Background()
c, err := google.DefaultClient(ctx, cloudresourcemanager.CloudPlatformScope)
if err != nil {
log.Fatal(err)
}
cloudresourcemanagerService, err := cloudresourcemanager.New(c)
if err != nil {
log.Fatal(err)
}
req := cloudresourcemanagerService.Projects.List()
if err := req.Pages(ctx, func(page *cloudresourcemanager.ListProjectsResponse) error {
for _, project := range page.Projects {
// TODO: Change code below to process each `project` resource:
fmt.Printf("%#v\n", project)
}
return nil
}); err != nil {
log.Fatal(err)
}
}
So I've to import each client library to get the client for that.
"google.golang.org/api/cloudresourcemanager/v1"
There's no dynamic creation of it.
Is it even possible, cause go is strict type checking 🤔
Thanks.
No, this is not possible with the Golang Google Cloud library.
You've nailed the point on the strict type checking, as it would definitely defeat the benefits of compile time type checking. It would also be a bad Golang practice to return different objects with different signatures, as we don't do duck typing and instead we rely on interface contracts.
Golang is boring and verbose, and it's like that by design :)
The kubernetes go client has tons of methods and I can't find how I can get the current CPU & RAM usage of a specific (or all pods).
Can someone tell me what methods I need to call to get the current usage for pods & nodes?
My NodeList:
nodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
Kubernetes Go Client: https://github.com/kubernetes/client-go
Metrics package: https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/metrics
As far as I got the metrics server implements the Kubernetes metrics package in order to fetch the resource usage from pods and nodes, but I couldn't figure out where & how they do it: https://github.com/kubernetes-incubator/metrics-server
It is correct that go-client does not have support for metrics type, but in the metrics package there is a pregenerated client that can be used for fetching metrics objects and assign them right away to the appropriate structure. The only thing you need to do first is to generate a config and pass it to metrics client. So a simple client for metrics would look like this:
package main
import (
"k8s.io/client-go/tools/clientcmd"
metrics "k8s.io/metrics/pkg/client/clientset/versioned"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func main() {
var kubeconfig, master string //empty, assuming inClusterConfig
config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig)
if err != nil{
panic(err)
}
mc, err := metrics.NewForConfig(config)
if err != nil {
panic(err)
}
mc.MetricsV1beta1().NodeMetricses().Get("your node name", metav1.GetOptions{})
mc.MetricsV1beta1().NodeMetricses().List(metav1.ListOptions{})
mc.MetricsV1beta1().PodMetricses(metav1.NamespaceAll).List(metav1.ListOptions{})
mc.MetricsV1beta1().PodMetricses(metav1.NamespaceAll).Get("your pod name", metav1.GetOptions{})
}
Each of the above methods from metric client returns an appropriate structure (you can check those here) and an error (if any) which you should process according to your requirements.
here is an example.
package main
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
metrics "k8s.io/metrics/pkg/client/clientset/versioned"
)
func main() {
var kubeconfig, master string //empty, assuming inClusterConfig
config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig)
if err != nil {
panic(err)
}
mc, err := metrics.NewForConfig(config)
if err != nil {
panic(err)
}
podMetrics, err := mc.MetricsV1beta1().PodMetricses(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
fmt.Println("Error:", err)
return
}
for _, podMetric := range podMetrics.Items {
podContainers := podMetric.Containers
for _, container := range podContainers {
cpuQuantity, ok := container.Usage.Cpu().AsInt64()
memQuantity, ok := container.Usage.Memory().AsInt64()
if !ok {
return
}
msg := fmt.Sprintf("Container Name: %s \n CPU usage: %d \n Memory usage: %d", container.Name, cpuQuantity, memQuantity)
fmt.Println(msg)
}
}
}
The API you're looking for in new versions of Kubernetes (tested on mine as of 1.10.7) is the metrics.k8s.io/v1beta1 API route.
You can see it locally if you run a kubectl proxy and check http://localhost:8001/apis/metrics.k8s.io/v1beta1/pods and /nodes on your localhost.
I see where your confusion is though. At the time of writing, it does not look like the metrics/v1beta1 has a generated typed package (https://godoc.org/k8s.io/client-go/kubernetes/typed), and doesn't appear in the kubernetes.ClientSet object.
You can hit all available endpoints directly though the rest.RestClient object, and just specify metrics/v1beta1 as the versionedAPIPath, which will be more work and less convenient than the nicely wrapped ClientSet, but I'm not sure how long it'll take before that API shows up in that interface.
I am trying to connect to an OpenShift/K8s cluster from inside a running pod via the Go API. Therfore, i am following the tutorial from here.
Currently i have a problem with creating the OpenShift build client, whose constructor gets a previously created rest.InClusterConfig() as an argument. This should work, since it is shown in the example, but i get this error:
cannot use restconfig (type *"k8s.io/client-go/rest".Config) as type *"github.com/openshift/client-go/vendor/k8s.io/client-go/rest".Config in argument to "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1".NewForConfig
I am a little confused, since rest.InClusterConfig() returns a *Config. This is accepted in corev1client.NewForConfig() which expects a *rest.Config. But buildv1client.NewForConfig() also expects a *rest.Config - but not exactly the restconfig i am creating with rest.InClusterConfig().
Where is my mistake? Bonus points for: I am taking my first steps with the API, and all it should do is to generate a second pod, from an image where some parameters are applied. Do i need the buildv1client client? This is pretty much Kubernetes core functionality.
The problem is that the package exists in the vendored folder in vendor/ and also on your $GOPATH. Vendoring "github.com/openshift/client-go" should solve your problem.
To answer your second question, for the use case you have described, not really. If you want to create an OpenShift build then yes you need to use the client as this API object does not exist in Kubernetes. If you want to simply create a Pod then you don't need the build client. A simple example for the API reference might look as follows:
package main
import (
"k8s.io/api/core/v1"
"k8s.io/client-go/tools/clientcmd"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
func main() {
kubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
clientcmd.NewDefaultClientConfigLoadingRules(),
&clientcmd.ConfigOverrides{},
)
namespace, _, err := kubeconfig.Namespace()
if err != nil {
panic(err)
}
restconfig, err := kubeconfig.ClientConfig()
if err != nil {
panic(err)
}
coreclient, err := corev1client.NewForConfig(restconfig)
if err != nil {
panic(err)
}
_, err = coreclient.Pods(namespace).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "example",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "ubuntu",
Image: "ubuntu:trusty",
Command: []string{"echo"},
Args: []string{"Hello World"},
},
},
},
})
if err != nil {
panic(err)
}
}
I would like to roll back the deployment to a certain revision( rollout history) using client-go library of k8s. But so far I havent found a solution. I could onyl fetch resource revision but not 'deployment revision' that I get using kebctl
kubectl rollout history deployment/nginx_dep
Here is the code using client-go api :
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
clientset, err := kubernetes.NewForConfig(config)
dp, err := clientset.ExtensionsV1beta1Client.Deployments("default").Get("nginx-deployment", metav1.GetOptions{})
Using client-go api:
How do I get the existing revision for the given deployment.? I want to roll back the deployment to use this revision. Can anyone tell me how I should do that??
Here is the list of dependecies in my project:
[[constraint]]
name = "k8s.io/client-go"
version = "3.0.0"
[[override]]
name = "k8s.io/apimachinery"
branch = "release-1.6"
Thank you in advance
Assuming you already had a look at the update example?
In any case, the dp variable here contains all you need:
dp, err := clientset.ExtensionsV1beta1Client.Deployments("default").Get("nginx-deployment", metav1.GetOptions{})
So dp is of type v1beta1.Deployment which contains a variable of type metav1.ObjectMeta which has the ResourceVersion.
clientset, err := kubernetes.NewForConfig(config)
deploymentsClient := clientset.AppsV1().Deployments("yournamespace")
result, err := deploymentsClient.Get("yourdeployment", metav1.GetOptions{})
version := result.GetObjectMeta().GetAnnotations()["deployment.kubernetes.io/revision"]
try try
https://github.com/kubernetes/kubernetes/blob/17fec00b8915dbffac40b9eb481516a66092ef3e/pkg/controller/deployment/rollback.go#L30-L69
import (
"fmt"
"github.com/golang/glog"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/types"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
func (un *Uninject) rollbackToTemplate(d *v1.Deployment, rs []v1.ReplicaSet) (bool, error) {
sort.Slice(rs, func(i, j int) bool {
return rs[i].CreationTimestamp.UnixNano() > rs[j].CreationTimestamp.UnixNano()
})
performedRollback := false
replicaSet := v1.ReplicaSet{}
for _, r := range rs {
for _, initContainer := range r.Spec.Template.Spec.InitContainers {
if initContainer.Name == "istio-init" {
performedRollback = true
break
} else {
klog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name)
}
}
if len(r.Spec.Template.Spec.InitContainers) == 0 || !performedRollback {
replicaSet = r
performedRollback = true
break
}
}
if performedRollback {
klog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, replicaSet.Spec.Template.Spec)
deploymentutil.SetFromReplicaSetTemplate(d, replicaSet.Spec.Template)
deploymentutil.SetDeploymentAnnotationsTo(d, &replicaSet)
}
return performedRollback, un.updateDeploymentAndClearRollbackTo(d)
}
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
// It is assumed that the caller will have updated the deployment template appropriately (in case
// we want to rollback).
func (un *Uninject) updateDeploymentAndClearRollbackTo(d *v1.Deployment) error {
klog.Infof("Cleans up rollbackTo of deployment %q", d.Name)
_, err := un.clientSet.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d, metav1.UpdateOptions{})
return err
}