I've the following code which works,I was able to create a helm chart in the target cluster.
when you install some chart until the application (within the chart) is available it takes time ,
How can I check if the application that installed via helm chart is up and running ?
is there a way to do it with the helm client (we are using helm 3.5.2)
tmpfile, err := ioutil.TempFile(kp, kcp)
if err != nil {
log.Error(err, "error")
}
defer os.Remove(tmpfile.Name())
if _, err := tmpfile.Write(cfg); err != nil {
return err
}
if err := tmpfile.Close(); err != nil {
return err
}
kcfgFilePath := tmpfile.Name()
settings := cli.New()
ac := new(action.Configuration)
clientGetter := genericclioptions.NewConfigFlags(false)
clientGetter.KubeConfig = &kcfgFilePath
for _, chartInstallation := range charts {
chart, err := loader.Load(chartInstallation.Path)
if err != nil {
return err
}
releaseName := releaseName + "-" + chartInstallation.Name
if err := ac.Init(clientGetter, settings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) {
}); err != nil {
return err
}
releasePresent := true
statusAction := action.NewStatus(ac)
status, err := statusAction.Run(releaseName)
if err != nil {
if strings.Contains(err.Error(), driver.ErrReleaseNotFound.Error()) {
releasePresent = false
} else {
return err
}
}
if !releasePresent {
// install chart
installAction := action.NewInstall(ac)
installAction.CreateNamespace = true
installAction.Namespace = chartInstallation.Namespace
installAction.ReleaseName = releaseName
_, err := installAction.Run(chart, nil)
if err != nil {
return err
}
log.Info(“chart installed: ", "releaseName", releaseName)
}
if status != nil {
if releasePresent && status.Info.Status.String() == release.StatusFailed.String() {
upgradeAction := action.NewUpgrade(ac)
upgradeAction.Wait = true
upgradeAction.ReuseValues = false
upgradeAction.Recreate = false
_, err := upgradeAction.Run(releaseName, chart, nil)
if err != nil {
return err
}
}
}
You need to helm install (or upgrade) using the --wait flag:
--wait: Waits until all Pods are in a ready state, PVCs are bound, Deployments have minimum (Desired minus maxUnavailable) Pods in ready
state and Services have an IP address (and Ingress if a LoadBalancer)
before marking the release as successful. It will wait for as long as
the --timeout [...]
Make sure that your pods have Liveness and Readiness checks. Using the --wait the release will only return successfully after these checks are passing.
Related
Versions
Sarama - v1.32.0
Kafka - 5.4.6-2.12
Go - v1.16.2
sarama.Logger = log.New(os.Stderr, "[Sarama] ", log.LstdFlags)
brokers := []string{
"kafka1:9092",
"kafka2:9092",
}
cfg := sarama.NewConfig()
cfg.Version = sarama.V2_4_0_0
admin, err := sarama.NewClusterAdmin(brokers, sarama.NewConfig())
if err != nil {
log.Fatal(err)
}
brok, _, err := admin.DescribeCluster()
if err != nil {
log.Fatal(err)
}
for _, b := range brok {
chck, err := b.Connected()
if err != nil {
log.Fatal(err)
}
log.Println(chck)
}
I am trying to check whether the brokers are connected but it returns false for the method Connected(). If there an explanation on as why it returns false? I need to write a health check so my initial plan was to check whether the brokers were connected but any other checks I could do?
saramaConfig := sarama.NewConfig()
saramaConfig.Version = sarama.V2_0_0_0
client, err := sarama.NewClient(brokers, saramaConfig)
if err != nil {
log.Fatal("NewKafkaAdmin", fmt.Sprintf(`cannot get controller - %+v`, err))
}
b, _ := client.Controller()
fmt.Println(b.Connected())
This return true. Why is that there is an mismatch of such?
This cannot be done using DescribeCluster method. A detailed explanation could be found in the following issue.
https://github.com/Shopify/sarama/issues/2222
I want to execute some redis commands atomically (HDel, SADD, HSet etc). I see the Watch feature in the go-redis to implement transactions , however since I am not going to modify the value of a key i.e use SET,GET etc , does it make sense to use Watch to execute it as transaction or just wrapping the commands in a TxPipeline would be good enough?
Approach 1 : Using Watch
func sampleTransaction() error{
transactionFunc := func(tx *redis.Tx) error {
// Get the current value or zero.
_, err := tx.TxPipelined(context.Background(), func(pipe redis.Pipeliner) error {
_, Err := tx.SAdd(context.Background(), "redis-set-key", "value1").Result()
if Err != nil {
return Err
}
_, deleteErr := tx.HDel(context.Background(), "redis-hash-key", "value1").Result()
if deleteErr != nil {
return deleteErr
}
return nil
})
return err
}
retries:=10
// Retry if the key has been changed.
for i := 0; i < retries; i++ {
fmt.Println("tries", i)
err := redisClient.Watch(context.Background(), transactionFunc())
if err == nil {
// Success.
return nil
}
if err == redis.TxFailedErr {
continue
}
return err
}
}
Approach 2: Just wrapping in TxPipelined
func sampleTransaction() error {
_, err:= tx.TxPipelined(context.Background(), func(pipe redis.Pipeliner) error {
_, Err := tx.SAdd(context.Background(), "redis-set-key", "value1").Result()
if Err != nil {
return Err
}
_, deleteErr := tx.HDel(context.Background(), "redis-hash-key", "value1").Result()
if deleteErr != nil {
return deleteErr
}
return nil
})
return err
}
As far as I know, pipelines do not guarantee atomicity. If you need atomicity, use lua.
https://pkg.go.dev/github.com/mediocregopher/radix.v3#NewEvalScript
I'm trying to get and return all the nodes of NodeClass Variable of my OPC UA Simulation Server starting at the root node and going down all folder nodes and object nodes that have childs. I tried to use browse-example of the gopcua repo but whenever the program gets to attrs, err := n.Attributes(...) for the second time, it returns an EOF error.
I tried to recreate a minimal example:
package main
import (
"context"
"fmt"
"log"
"github.com/gopcua/opcua"
"github.com/gopcua/opcua/id"
"github.com/gopcua/opcua/ua"
)
const (
endpoint string = "opc.tcp://<ServerAddress>"
rootNodeId string = "i=85"
)
func browse(c *opcua.Client, n *opcua.Node) error {
_, err := n.Attributes(ua.AttributeIDNodeClass, ua.AttributeIDDataType)
if err != nil {
return err
}
browseChildren := func(refType uint32) error {
refs, err := n.ReferencedNodes(refType, ua.BrowseDirectionForward, ua.NodeClassAll, true)
if err != nil {
return fmt.Errorf("references: %d: %s", refType, err)
}
fmt.Printf("found %d child refs\n", len(refs))
for _, rn := range refs {
err := browse(c, rn)
if err != nil {
return fmt.Errorf("browse children: %s", err)
}
fmt.Printf("Found a Node: %s\n", rn.ID.String())
}
return nil
}
if err := browseChildren(id.HasChild); err != nil {
return err
}
if err := browseChildren(id.Organizes); err != nil {
return err
}
return nil
}
func main() {
ctx := context.Background()
c := opcua.NewClient(endpoint)
if err := c.Connect(ctx); err != nil {
log.Fatal("Could not connect")
panic(err)
}
defer c.Close()
id, _ := ua.ParseNodeID(rootNodeId)
err := browse(c, c.Node(id))
if err != nil {
log.Fatal(err)
}
}
This is how the Prosys OPC UA Simulation Server looks like:
And this the output I get from the program:
found 0 child refs
found 5 child refs
2022/01/21 14:26:06 browse children: EOF
exit status 1
tl;dr
browseChildren := func(refType uint32) error {
refs, err := n.ReferencedNodes(refType, ua.BrowseDirectionForward, ua.NodeClassAll, true)
if err != nil {
return fmt.Errorf("references: %d: %s", refType, err)
}
fmt.Printf("found %d child refs\n", len(refs))
for _, rn := range refs {
refNodeID := ua.MustParseNodeID(rn.ID.String())
refNode := c.Node(refNodeID) // parse the referenced nodes before usage
err := browse(c, refNode)
if err != nil {
return fmt.Errorf("browse children: %s", err)
}
fmt.Printf("Found a Node: %s\n", refNode.ID.String())
}
return nil
}
Reason
The referenced nodes that are returned by n.ReferencedNodes have an invalid NodeIDType (64). The only supported NodeIDTypes are 0-5. Reparsing them gives them a proper NodeIDType. (Related Gitlab issue: https://github.com/gopcua/opcua/issues/550)
I use the following code which works and installs helm charts.
I got a list of charts and it installs each chart (via loop) and wait (upgradeAction.Wait = true, see below ) that the chart is up and running (using the wait=true flag of the helm) and then install the next one, the problem is that it takes a lot of time to wait that each chart is up-and-running and just then proceed to the next one, Is there a way to install all in parallel and just verify at the end (of all the charts installations) that it works (like how the wait works but for list of charts).
Here is the code:
mpfile, err := ioutil.TempFile(kp, kcp)
if err != nil {
log.Error(err, "error")
}
defer os.Remove(tmpfile.Name())
if _, err := tmpfile.Write(cfg); err != nil {
return err
}
if err := tmpfile.Close(); err != nil {
return err
}
kcfgFilePath := tmpfile.Name()
settings := cli.New()
ac := new(action.Configuration)
clientGetter := genericclioptions.NewConfigFlags(false)
clientGetter.KubeConfig = &kcfgFilePath
for _, chartInstallation := range charts {
chart, err := loader.Load(chartInstallation.Path)
if err != nil {
return err
}
releaseName := releaseName + "-" + chartInstallation.Name
if err := ac.Init(clientGetter, settings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) {
}); err != nil {
return err
}
releasePresent := true
statusAction := action.NewStatus(ac)
status, err := statusAction.Run(releaseName)
if err != nil {
if strings.Contains(err.Error(), driver.ErrReleaseNotFound.Error()) {
releasePresent = false
} else {
return err
}
}
if !releasePresent {
// install chart
installAction := action.NewInstall(ac)
installAction.CreateNamespace = true
installAction.Namespace = chartInstallation.Namespace
installAction.ReleaseName = releaseName
_, err := installAction.Run(chart, nil)
if err != nil {
return err
}
log.Info("chart installed: ", "releaseName", releaseName)
}
if status != nil {
if releasePresent && status.Info.Status.String() == release.StatusFailed.String() {
upgradeAction := action.NewUpgrade(ac)
// HERE IT WAIT FOR THE CHART TO VERIFY THAT EVERYTHING IS UP
upgradeAction.Wait = true
upgradeAction.ReuseValues = false
upgradeAction.Recreate = false
_, err := upgradeAction.Run(releaseName, chart, nil)
if err != nil {
return err
}
}
}
}
If I change it to upgradeAction.Wait = false , It starts to install all the charts without waiting to each one health checks, however not sure how can I verify it at the end of all the charts installations
You could start goroutines for each chart you're installing (wrapping chart install code inside go routines) and then use sync.WaitGroup to wait all goroutines to finish. Something like this:
package main
import (
"fmt"
"os"
"strings"
"sync"
)
func main() {
kcfgFilePath := tmpfile.Name()
settings := cli.New()
ac := new(action.Configuration)
clientGetter := genericclioptions.NewConfigFlags(false)
clientGetter.KubeConfig = &kcfgFilePath
var wg sync.WaitGroup
for _, chartInstallation := range charts {
wg.Add(1)
go installChart(&wg, chartInstallation.Path)
}
fmt.Println("Installing...")
wg.Wait()
fmt.Println("Installed!")
}
func installChart(wg *sync.WaitGroup, chartInstallationPath string) error {
defer wg.Done()
chart, err := loader.Load(chartInstallationPath)
if err != nil {
return err
}
releaseName := releaseName + "-" + chartInstallation.Name
if err := ac.Init(clientGetter, settings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) {
}); err != nil {
return err
}
releasePresent := true
statusAction := action.NewStatus(ac)
status, err := statusAction.Run(releaseName)
if err != nil {
if strings.Contains(err.Error(), driver.ErrReleaseNotFound.Error()) {
releasePresent = false
} else {
return err
}
}
if !releasePresent {
// install chart
installAction := action.NewInstall(ac)
installAction.CreateNamespace = true
installAction.Namespace = chartInstallation.Namespace
installAction.ReleaseName = releaseName
_, err := installAction.Run(chart, nil)
if err != nil {
return err
}
log.Info("chart installed: ", "releaseName", releaseName)
}
if status != nil {
if releasePresent && status.Info.Status.String() == release.StatusFailed.String() {
upgradeAction := action.NewUpgrade(ac)
// HERE IT WAIT FOR THE CHART TO VERIFY THAT EVERYTHING IS UP
upgradeAction.Wait = true
upgradeAction.ReuseValues = false
upgradeAction.Recreate = false
_, err := upgradeAction.Run(releaseName, chart, nil)
if err != nil {
return err
}
}
}
}
Here's a good resource for that: https://goinbigdata.com/golang-wait-for-all-goroutines-to-finish/
I'm doing a CLI in Go with gojenkins. I need to run a build and then retrieve the job information (because then I want to wait till finish), but id retrieved by BuildJob method is not the same that GetBuild needs.
func RunPipeline(config *config.Profile) {
jobName := "test_pipeline"
jk, err := getClient(config)
params := make(map[string]string)
id, err := jk.BuildJob(jobName, params)
if err != nil {
fmt.Println(err)
}
fmt.Printf("Build number: %d\n", id)
build, err := jk.GetBuild(jobName, id)
if err != nil {
fmt.Printf("Did not found build: %s\n", err)
}
fmt.Println(build)
}
Output:
Build number: 86
Did not found build: 404
<nil>
The last build id for that pipeline should be 37, but I got 86, can somoane helps me how to get that id? I didn't find anything on the lib documentation.
Finaly I've found this issue on github. BuildJob's id is the id on the queue. So this works for me.
func Pipeline(config *config.Profile, buildData BuildData) {
jobName := "test_pipeline"
jk, err := getClient(config)
if err != nil {
panic(err)
}
queueId, err := jk.BuildJob(jobName, buildData.toParams())
if err != nil {
fmt.Println(err)
}
task, err := jk.GetQueueItem(queueId)
if err != nil {
panic(err)
}
retry := 0
for retry < 60 {
if task.Raw.Executable.URL != "" {
break
}
time.Sleep(1 * time.Second)
task.Poll()
retry++
}
build, err := jk.GetBuild(jobName, task.Raw.Executable.Number)
if err != nil {
panic(err)
}
fmt.Printf("%+v\n", build.Raw)
}