I am trying to run a very simple operation just to list the clusters in a project, using the google cloud sdk for go
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"google.golang.org/api/container/v1"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // register GCP auth provider
)
var fProjectId = flag.String("projectId", "", "specify a project id to examine")
func main() {
flag.Parse()
if *fProjectId == "" {
log.Fatal("must specific -projectId")
}
ctx := context.TODO()
svc, err := container.NewService(ctx)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Ask Google for a list of all kube clusters in the given project.
_, err = svc.Projects.Zones.Clusters.List(*fProjectId, "-").Context(ctx).Do()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
the code fails as follows
▶ go run main.go --projectId my-project-id
Get "https://container.googleapis.com/v1/projects/my-project-id/zones/-/clusters?alt=json&prettyPrint=false": oauth2: cannot fetch token: 400 Bad Request
Response: {
"error": "invalid_grant",
"error_description": "reauth related error (rapt_required)",
"error_uri": "https://support.google.com/a/answer/9368756",
"error_subtype": "rapt_required"
}
exit status 1
However the command
gcloud container clusters list
succeeds?
What might be the issue here?
The answer in the link is not very informative.
EDIT: The problem was solved once I run
gcloud auth application-default login
Why is this needed?
Related
I am very new at Golang AWS SDK V2, I had similar code work without AWS results, but this one is getting me issues since the types are different. Also, I have search and none of the examples is with the code pipeline aws-sdk-v2 with the type of JSON I have to unmarshal.
I hope some of you can help me.
— This below is main.go I have the structs in another file called un-marshal.go that I created with the result of getting the same output I need from awscli tool and passing it by https://mholt.github.io/json-to-go/
package main
import (
"context"
"fmt"
"log"
"encoding/json"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/codepipeline"
)
func main() {
pipeline_name := "frontend"
// Load the Shared AWS Configuration (~/.aws/config)
cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("eu-central-1"))
if err != nil {
log.Fatal(err)
}
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
client := codepipeline.NewFromConfig(cfg)
pipeJson, err := client.GetPipelineState(context.TODO(), &codepipeline.GetPipelineStateInput{
Name: &pipeline_name,
})
if err != nil {
log.Println("Error getting Pipeline")
}
var cookie PipeLineResult
json.Unmarshal(pipeJson, &cookie)
fmt.Println("The name of the pipeline is: %s",cookie.PipelineName)
}
The error I am getting is:
/main.go:39:17: cannot use pipeJson (variable of type *codepipeline.GetPipelineStateOutput) as type []byte in argument to json.Unmarshal
Here I am lost because it is a new type, and not sure if I should convert, how to convert or work with the native type etc.
Thanks in advance.
I am trying to run Golang Azure SDK code to get a list of RGs in my subscriptions but I am getting the following error:
2022/01/22 20:25:58 MSI not available
exit status 1
import (
"context"
"fmt"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2020-10-01/resources"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/Azure/go-autorest/autorest/to"
"log"
"os"
)
func main() {
authorize, err := auth.NewAuthorizerFromEnvironment()
if err != nil {
log.Fatal(err)
}
subscriptionID := os.Getenv("AZURE_SUB_ID")
//Read resource groups
resGrpClient := resources.NewGroupsClient(subscriptionID)
resGrpClient.Authorizer = authorize
//Read resources within the resource group
resClient := resources.NewClient(subscriptionID)
resClient.Authorizer = authorize
for resGrpPage, err := resGrpClient.List(context.Background(), "", nil); resGrpPage.NotDone(); err = resGrpPage.Next() {
if err != nil {
log.Fatal(err)
}
for _, resGrp := range resGrpPage.Values() {
fmt.Println("Resource Group Name: ", to.String(resGrp.Name))
resList, _ := resClient.ListByResourceGroup(context.Background(), to.String(resGrp.Name), "", "", nil)
for _, res := range resList.Values() {
fmt.Println("\t- Resource Name: ", to.String(res.Name), " | Resource Type: ", to.String(res.Type))
}
}
}
}
I am using Goland and trying to run the app in WSL Ubuntu
Solution is to use auth.NewAuthorizerFromCLI(), as auth.NewAuthorizerFromEnvironment does not use the Cli and MSI stands for managed system identity.
Please read this documentation Use environment-based authentication
You have a couple of options
and they need specific environments variables to be present.
In my case, I use Client credentials so I need to have these 3 envs present when I run my code.
AZURE_CLIENT_ID
AZURE_CLIENT_SECRET
AZURE_TENANT_ID
import (
dataflow "cloud.google.com/go/dataflow/apiv1beta3"
"cloud.google.com/go/functions/metadata"
"context"
"fmt"
dataflowpb "google.golang.org/genproto/googleapis/dataflow/v1beta3"
"log"
"os"
)
...
func KickDataflow(ctx context.Context, start, end string) error {
client, err := dataflow.NewTemplatesClient(ctx)
if err != nil {
return err
}
defer client.Close()
req := &dataflowpb.CreateJobFromTemplateRequest{
ProjectId: "xxx",
JobName: fmt.Sprintf("transform-orders_%s", start),
Template: &dataflowpb.CreateJobFromTemplateRequest_GcsPath{
GcsPath: gcsPath,
},
Parameters: map[string]string{
"input": fmt.Sprintf("gs://xxx/order_updates/dt=%s/orders_%s_%s.jsonl", start, start, end),
},
Environment: &dataflowpb.RuntimeEnvironment{
NumWorkers: int32(1),
MaxWorkers: int32(3),
WorkerZone: "asia-northeast1-b",
TempLocation: tempLocation,
ServiceAccountEmail: os.Getenv("SERVICE_ACCOUNT"),
},
Location: "asia-northeast1",
}
_, err = client.CreateJobFromTemplate(ctx, req)
if err != nil {
return err
}
}
It returns the error.
error in kicking dataflow job: rpc error: code = FailedPrecondition desc = (9abc46254fa5372d): The workflow could not be created, since it was sent to an invalid regional endpoint (asia-northeast1). Please resubmit to a valid Cloud Dataflow regional endpoint. The list of Cloud Dataflow regional endpoints is at https://cloud.google.com/dataflow/docs/concepts/regional-endpoints.
There seems to be the same issue in Node.js client.
Google Dataflow - Invalid Regional Endpoint - Impossible set region on template from nodejs client
How to resolve this issue using Go client?
I've recently shifted from python to golang. I had been using python to work with GCP.
I used to pass in the scopes and mention the discovery client I wanted to create like this :
def get_client(scopes, api, version="v1"):
service_account_json = os.environ.get("SERVICE_ACCOUNT_KEY_JSON", None)
if service_account_json is None:
sys.exit("Exiting !!! No SSH_KEY_SERVICE_ACCOUNT env var found.")
credentials = service_account.Credentials.from_service_account_info(
json.loads(b64decode(service_account_json)), scopes=scopes
)
return discovery.build(api, version, credentials=credentials, cache_discovery=False)
And this would create my desired discovery client, whether it be compute engine service or sqladmin
However in go I don't seem to find this.
I found this : https://pkg.go.dev/google.golang.org/api/discovery/v1
For any client that I want to create I would've to import that and then create that, like this :
https://cloud.google.com/resource-manager/reference/rest/v1/projects/list#examples
package main
import (
"fmt"
"log"
"golang.org/x/net/context"
"golang.org/x/oauth2/google"
"google.golang.org/api/cloudresourcemanager/v1"
)
func main() {
ctx := context.Background()
c, err := google.DefaultClient(ctx, cloudresourcemanager.CloudPlatformScope)
if err != nil {
log.Fatal(err)
}
cloudresourcemanagerService, err := cloudresourcemanager.New(c)
if err != nil {
log.Fatal(err)
}
req := cloudresourcemanagerService.Projects.List()
if err := req.Pages(ctx, func(page *cloudresourcemanager.ListProjectsResponse) error {
for _, project := range page.Projects {
// TODO: Change code below to process each `project` resource:
fmt.Printf("%#v\n", project)
}
return nil
}); err != nil {
log.Fatal(err)
}
}
So I've to import each client library to get the client for that.
"google.golang.org/api/cloudresourcemanager/v1"
There's no dynamic creation of it.
Is it even possible, cause go is strict type checking 🤔
Thanks.
No, this is not possible with the Golang Google Cloud library.
You've nailed the point on the strict type checking, as it would definitely defeat the benefits of compile time type checking. It would also be a bad Golang practice to return different objects with different signatures, as we don't do duck typing and instead we rely on interface contracts.
Golang is boring and verbose, and it's like that by design :)
I started using AWS SAM and for now I only have some unit tests, but I want to try to run integration tests in a pre traffic hook function.
Unfortunately there seems to be no code example for Golang, all I could find was for Javascript.
From this example I pieced together that I have to use the code deploy SDK and call PutLifecycleEventHookExecutionStatus, but the specifics remain unclear. The aws code example repo for go has no examples for code deploy either.
More Information about the topic that I am looking for is available here https://github.com/awslabs/serverless-application-model/blob/master/docs/safe_lambda_deployments.rst#pretraffic-posttraffic-hooks.
I want to start out by testing a lambda function that simply queries DynamoDB.
Something like this works:
package main
import (
"context"
"encoding/json"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/codedeploy"
)
type CodeDeployEvent struct {
DeploymentId string `json:"deploymentId"`
LifecycleEventHookExecutionId string `json:"lifecycleEventHookExecutionId"`
}
func HandleRequest(ctx context.Context, event CodeDeployEvent) (string, error) {
// add some tests here and change status flag as needed . . .
client := codedeploy.New(session.New())
params := &codedeploy.PutLifecycleEventHookExecutionStatusInput{
DeploymentId: &event.DeploymentId,
LifecycleEventHookExecutionId: &event.LifecycleEventHookExecutionId,
Status: "Succeeded",
}
req, _ := client.PutLifecycleEventHookExecutionStatusRequest(params)
_ = req.Send()
}
I got around to implement this and want to share my complete solution.
After figuring out how to use it, I decided against using it, because there are a couple of drawbacks.
there is no way to expose a new version of the canary to a dedicated portion of the user base, that means sometimes they'll hit the new or the old version
invoking functions that publish to sns will trigger all downstream actions, which might get the new or the old version of the downstream services, which would cause a lot of problems in case of breaking APIs
IAM changes affect both version immediately, possibly breaking the old version.
Instead, I deploy everything to a pre prod account, run my integration and e2e tests and if they succeed I'll deploy to prod
the cdk code to create a canary deployment:
const versionAlias = new lambda.Alias(this, 'Alias', {
aliasName: "alias",
version: this.lambda.currentVersion,
})
const preHook = new lambda.Function(this, 'LambdaPreHook', {
description: "pre hook",
code: lambda.Code.fromAsset('dist/upload/convert-pre-hook'),
handler: 'main',
runtime: lambda.Runtime.GO_1_X,
memorySize: 128,
timeout: cdk.Duration.minutes(1),
environment: {
FUNCTION_NAME: this.lambda.currentVersion.functionName,
},
reservedConcurrentExecutions: 5,
logRetention: RetentionDays.ONE_WEEK,
})
// this.lambda.grantInvoke(preHook) // this doesn't work, I need to grant invoke to all functions :s
preHook.addToRolePolicy(new iam.PolicyStatement({
actions: [
"lambda:InvokeFunction",
],
resources: ["*"],
effect: iam.Effect.ALLOW,
}))
const application = new codedeploy.LambdaApplication(this, 'CodeDeployApplication')
new codedeploy.LambdaDeploymentGroup(this, 'CanaryDeployment', {
application: application,
alias: versionAlias,
deploymentConfig: codedeploy.LambdaDeploymentConfig.ALL_AT_ONCE,
preHook: preHook,
autoRollback: {
failedDeployment: true,
stoppedDeployment: true,
deploymentInAlarm: false,
},
ignorePollAlarmsFailure: false,
// alarms:
// autoRollback: codedeploy.A
// postHook:
})
My go code of the pre hook function. PutLifecycleEventHookExecutionStatus tells code deploy if the pre hook succeeded or not. Unfortunately in case you fail the deployment message, the message you get in the cdk deploy output is utterly useless, so you need to check the pre/post hook logs.
In order to actually run the integration test I simply invoke the lambda and check if an error occurred.
package main
import (
"encoding/base64"
"fmt"
"log"
"os"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/codedeploy"
lambdaService "github.com/aws/aws-sdk-go/service/lambda"
)
var svc *codedeploy.CodeDeploy
var lambdaSvc *lambdaService.Lambda
type codeDeployEvent struct {
DeploymentId string `json:"deploymentId"`
LifecycleEventHookExecutionId string `json:"lifecycleEventHookExecutionId"`
}
func handler(e codeDeployEvent) error {
params := &codedeploy.PutLifecycleEventHookExecutionStatusInput{
DeploymentId: &e.DeploymentId,
LifecycleEventHookExecutionId: &e.LifecycleEventHookExecutionId,
}
err := handle()
if err != nil {
log.Println(err)
params.Status = aws.String(codedeploy.LifecycleEventStatusFailed)
} else {
params.Status = aws.String(codedeploy.LifecycleEventStatusSucceeded)
}
_, err = svc.PutLifecycleEventHookExecutionStatus(params)
if err != nil {
return fmt.Errorf("failed putting the lifecycle event hook execution status. the status was %s", *params.Status)
}
return nil
}
func handle() error {
functionName := os.Getenv("FUNCTION_NAME")
if functionName == "" {
return fmt.Errorf("FUNCTION_NAME not set")
}
log.Printf("function name: %s", functionName)
// invoke lambda via sdk
input := &lambdaService.InvokeInput{
FunctionName: &functionName,
Payload: nil,
LogType: aws.String(lambdaService.LogTypeTail), // returns the log in the response
InvocationType: aws.String(lambdaService.InvocationTypeRequestResponse), // synchronous - default
}
err := input.Validate()
if err != nil {
return fmt.Errorf("validating the input failed: %v", err)
}
resp, err := lambdaSvc.Invoke(input)
if err != nil {
return fmt.Errorf("failed to invoke lambda: %v", err)
}
decodeString, err := base64.StdEncoding.DecodeString(*resp.LogResult)
if err != nil {
return fmt.Errorf("failed to decode the log: %v", err)
}
log.Printf("log result: %s", decodeString)
if resp.FunctionError != nil {
return fmt.Errorf("lambda was invoked but returned error: %s", *resp.FunctionError)
}
return nil
}
func main() {
sess, err := session.NewSession()
if err != nil {
return
}
svc = codedeploy.New(sess)
lambdaSvc = lambdaService.New(sess)
lambda.Start(handler)
}