kubernetes client-go convert yaml to go code - go

Is there documentation on building k8s jobs in go-client somewhere? In particular I'm trying to convert a job yaml to go code and for the life of me cannot find reference docs that say how the fields convert

k8s.io/api is a package of Kubernetes which kubectl and other components use it to implement Kubernetes APIs. In this package, there is a struct that implements the Job API and you can use it to convert Job manifests to go structs.
I think this code can help:
package main
import (
"fmt"
"io/ioutil"
"os"
"gopkg.in/yaml.v2"
v1 "k8s.io/api/batch/v1"
)
func main() {
file, err := os.Open("/path/to/job.yaml")
if err != nil {
panic(err)
}
b, err := ioutil.ReadAll(file)
if err != nil {
panic(err)
}
job := &v1.Job{}
err = yaml.Unmarshal(b, job)
if err != nil {
panic(err)
}
fmt.Println(job)
}

Converting YAML to Golang can be difficult, and often times the documentation with examples is missing.
I wrote a tool called naml that is able to convert any Kubernetes YAML to raw Go. It is handy because it works using the version of Kubernetes you are running it against, and is compiled with the latest version of the Kubernetes code base.
If you wanted to create a Job, and see valid Go for the job it would look like this. Creating a Job beeps with container image boops
[nova#emma ~]$ kubectl create job beeps --image boops
job.batch/beeps created
[nova#emma ~]$
Naml will out a working program by design, but you will also get the output you are looking for.
[nova#emma naml]$ kubectl get job beeps -o yaml | naml codify
// Copyright © 2021 Kris Nóva <kris#nivenly.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// ███╗ ██╗ █████╗ ███╗ ███╗██╗
// ████╗ ██║██╔══██╗████╗ ████║██║
// ██╔██╗ ██║███████║██╔████╔██║██║
// ██║╚██╗██║██╔══██║██║╚██╔╝██║██║
// ██║ ╚████║██║ ██║██║ ╚═╝ ██║███████╗
// ╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝
//
package main
import (
"context"
"fmt"
"os"
"github.com/hexops/valast"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/kris-nova/naml"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
)
// Version is the current release of your application.
var Version string = "0.0.1"
func main() {
// Load the application into the NAML registery
// Note: naml.Register() can be used multiple times.
naml.Register(NewApp("App", "Application autogenerated from NAML v0.3.1"))
// Run the generic naml command line program with
// the application loaded.
err := naml.RunCommandLine()
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
// App is a very important grown up business application.
type App struct {
metav1.ObjectMeta
description string
objects []runtime.Object
// ----------------------------------
// Add your configuration fields here
// ----------------------------------
}
// NewApp will create a new instance of App.
//
// See https://github.com/naml-examples for more examples.
//
// This is where you pass in fields to your application (similar to Values.yaml)
// Example: func NewApp(name string, example string, something int) *App
func NewApp(name, description string) *App {
return &App{
description: description,
ObjectMeta: metav1.ObjectMeta{
Name: name,
ResourceVersion: Version,
},
// ----------------------------------
// Add your configuration fields here
// ----------------------------------
}
}
func (a *App) Install(client *kubernetes.Clientset) error {
var err error
beepsJob := &batchv1.Job{
TypeMeta: metav1.TypeMeta{
Kind: "Job",
APIVersion: "batch/batchv1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "beeps",
Namespace: "default",
UID: types.UID("650e4f36-3316-4506-bbe0-1e34c13742cf"),
ResourceVersion: "3231200",
Generation: 1,
Labels: map[string]string{
"controller-uid": "650e4f36-3316-4506-bbe0-1e34c13742cf",
"job-name": "beeps",
},
},
Spec: batchv1.JobSpec{
Parallelism: valast.Addr(int32(1)).(*int32),
Completions: valast.Addr(int32(1)).(*int32),
BackoffLimit: valast.Addr(int32(6)).(*int32),
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
"controller-uid": "650e4f36-3316-4506-bbe0-1e34c13742cf",
}},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
"controller-uid": "650e4f36-3316-4506-bbe0-1e34c13742cf",
"job-name": "beeps",
}},
Spec: corev1.PodSpec{
Containers: []corev1.Container{corev1.Container{
Name: "beeps",
Image: "boops",
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: corev1.TerminationMessagePolicy("File"),
ImagePullPolicy: corev1.PullPolicy("Always"),
}},
RestartPolicy: corev1.RestartPolicy("Never"),
TerminationGracePeriodSeconds: valast.Addr(int64(30)).(*int64),
DNSPolicy: corev1.DNSPolicy("ClusterFirst"),
SecurityContext: &corev1.PodSecurityContext{},
SchedulerName: "default-scheduler",
},
},
CompletionMode: valast.Addr(batchv1.CompletionMode("NonIndexed")).(*batchv1.CompletionMode),
Suspend: valast.Addr(false).(*bool),
},
}
a.objects = append(a.objects, beepsJob)
if client != nil {
_, err = client.BatchV1().Jobs("default").Create(context.TODO(), beepsJob, metav1.CreateOptions{})
if err != nil {
return err
}
}
return err
}
func (a *App) Uninstall(client *kubernetes.Clientset) error {
var err error
if client != nil {
err = client.BatchV1().Jobs("default").Delete(context.TODO(), "beeps", metav1.DeleteOptions{})
if err != nil {
return err
}
}
return err
}
func (a *App) Description() string {
return a.description
}
func (a *App) Meta() *metav1.ObjectMeta {
return &a.ObjectMeta
}
func (a *App) Objects() []runtime.Object {
return a.objects
}

Related

List kubernetes resources by clinet-go, How can I get the kind and apiversion?

When I get the kubernetes resources from api with client-go, but I can't found the apiversion and kind in the response, the apiversion and kind is empty. How can I get the apiversion and kind of the resource?
below is my code:
package main
import (
"flag"
"k8s.io/client-go/tools/clientcmd"
"log"
"k8s.io/client-go/kubernetes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"fmt"
)
var clientset *kubernetes.Clientset
func main() {
k8sconfig := flag.String("k8sconfig","./k8sconfig","kubernetes config file path")
flag.Parse()
config , err := clientcmd.BuildConfigFromFlags("",*k8sconfig)
if err != nil {
log.Println(err)
}
clientset , err = kubernetes.NewForConfig(config)
if err != nil {
log.Fatalln(err)
} else {
fmt.Println("connect k8s success")
}
pods,err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
if err != nil {
log.Println(err.Error())
}
for _, pod := range pods.Items{
fmt.Println("apiversion: ", pod.APIVersion, "kind: ", pod.Kind)
}
}
The output:
apiversion: kind:
apiversion: kind:
apiversion: kind:
apiversion: kind:
apiversion: kind:
apiversion: kind:
apiversion: kind:
apiversion: kind:
......
......
I think the issue is that you are getting the list of pods using the List() API so it is not a pod as you expect and doesn't have a Kind field.
You need to iterate over the list of pods to access individual pods:
for _, pod := range pods.Items {
fmt.Printf("%s %s\n", pod.GetName(), pod.GetCreationTimestamp())
}
The Kind field is present as part of Pod's Metadata and can be accessed using pod.ObjectMeta.Kind.
You are not getting the APIVersion and Kind because it was ignored from the code.
If you take a look at the API server JSON response, it will be something like below:
{
"kind":"PodList",
"apiVersion":"v1",
"metadata":{
"resourceVersion":"2397"
},
"items":[
{
... ... ...
So the response does contain the APIVersion and Kind. But when the response is decoded to k8s object, here
out, _, err := r.decoder.Decode(r.body, nil, obj)
Here you can see that the second output parameter is ignored, which is schema.GroupVersionKind.
func (c *Something) Decode([]byte, *schema.GroupVersionKind, runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
}
N.B.: When you are making an API call using the client-go (unless dynamic client), you already know the APIVersion (ie. CoreV1() ) and the Kind ( ie. List()).

Unable to unmarshal array field in golang

I am trying to unmasrhal the following flux HelmRelease file.
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
annotations:
fluxcd.io/automated: 'false'
fluxcd.io/tag.ats: glob:*
name: ats
namespace: myns
spec:
chart:
git: git#github.com:reponame/project.git
path: charts/path1/path1/myapp
ref: master
releaseName: foobar
values:
allowAllEgress: true
recycleApp: true
hooks:
slackChannel: https://hooks.slack.com/services/something/somethingelse/
Here are my models
type HelmReleaseValues struct {
AllowAllEgress bool `yaml:"allowAllEgress"`
RecycleApp bool `yaml:"recycleApp"`
Hooks `yaml:"hooks"`
}
type Hooks struct {
SlackChannel string `yaml:"slackChannel"`
}
type Values struct {
HelmReleaseValues `yaml:"values"`
ReleaseName string `yaml:"releaseName"`
Chart `yaml:"chart"`
}
type Spec struct {
Values `yaml:"spec"`
}
The problem is that the fields allowAllEgress and recycleApp are getting unmarshalled.
However the Hooks field in my struct turns out to be empty.
What am I doing wrong in the struct modelling / tagging?
edit: here is my code
package main
import (
"fmt"
"io/ioutil"
"os"
"github.com/davecgh/go-spew/spew"
"gopkg.in/yaml.v3"
)
const ExitCodeCmdErr = 1
func main() {
rawYaml := parseHelmReleaseFile("myfile.yaml")
spew.Dump(rawYaml)
}
func parseHelmReleaseFile(fileName string) Spec {
var v Spec
yamlFile, err := ioutil.ReadFile(fileName)
if err != nil {
fmt.Printf("yaml file err #%v ", err)
os.Exit(ExitCodeCmdErr)
}
err = yaml.Unmarshal(yamlFile, &v)
if err != nil {
fmt.Printf("Unmarshal: %v", err)
os.Exit(ExitCodeCmdErr)
}
return v
}
I am running the program and grepping for the output (the actual helm release file is huge)
▶ go clean && gb .
~/Desktop/yamltutorial
./foobar | grep -i hooks -A 3
--
Hooks: (main.Hooks) {
SlackChannel: (string) ""
}
},
You did not have Chart struct
type Chart struct {
Git string `yaml:"git"`
Path string `yaml:"path"`
Ref string `yaml:"ref"`
}
Added that and got the following output
{Values:{HelmReleaseValues:{AllowAllEgress:true RecycleApp:true Hooks:{SlackChannel:https://hooks.slack.com/services/something/somethingelse/}} ReleaseName:foobar Chart:{Git:git#github.com:reponame/project.git Path:charts/path1/path1/myapp Ref:master}}}
Playground file with complete code.
https://play.golang.org/p/vCnjApr6gI9

Generic client.Get for custom Kubernetes GO operator

In a custom Kubernetes operator implemented with the operator-sdk in golang is it possible to call the custom API directly and retrieve the object as YAML?
For example. I have a custom resource
apiVersion: test.com/v1alpha1
kind: TEST
metadata::
name: example-test
spec:
replicas: 3
randomname: value
I don't know ahead of time what the fields in the spec are going to be apart from replicas. So I am not able to create a go type that includes structs to hold the entries.
So rather than doing:
instance := &testv1alpha1.Test{}
err := r.client.Get(context.TODO(), nameSpaceName, instance)
I want to be able to do something like:
instanceYAML := genericContainer{}
err := r.client.GetGeneric(context.TODO(), nameSpaceName, instance)
and then parse the instanceYAML to check the entries.
This is called the "unstructured" client. The docs are pretty light so I recommend looking over the tests as examples https://github.com/kubernetes-sigs/controller-runtime/blob/ea32729106c995d9df310ac4731c2061490addfb/pkg/client/client_test.go#L1536-L1566
Using the unstructured type client all the operation done on the CoreAPI resource can be done on the CustomResourceDefinition(CRD)
package util
import (
"context"
"encoding/json"
"strconv"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var reqLogger logr.Logger
func CRUDonCRD(c client.Client) {
u := createUnstructuredObject(c)
GetCR(u, c)
PatchCR(u, c, 1)
DeleteCR(u, c)
}
func createUnstructuredObject(c client.Client) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(schema.GroupVersionKind{
// Group: "<crd group name>",
// Kind: "<crd kind>",
// Version: "<crd version>",
Group: "com.cumulus.netq.operator.transport",
Kind: "TransportOperator",
Version: "v1",
})
_ = c.Get(context.Background(), client.ObjectKey{
// Namespace: "<namespace>",
// Name: "cr name",
Namespace: "default",
Name: "transport-operator",
}, u)
return u
}
type patchStringValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
func PatchCR(u *unstructured.Unstructured, c client.Client, replicaCount int) error {
payload := []patchStringValue{{
Op: "replace",
Path: "/spec/replicas/replicasOpta",
Value: strconv.Itoa(replicaCount),
}}
payloadBytes, _ := json.Marshal(payload)
err := c.Patch(context.Background(), u, client.RawPatch(types.JSONPatchType, payloadBytes))
if err != nil {
reqLogger.Error(err, "error occured while patching")
}
reqLogger.Info("Patching is successful", "Patched Transport Operator Object", u)
return err
}
func GetCR(u *unstructured.Unstructured, c client.Client) {
// key := client.ObjectKey{Namespace: "default", Name: "<cr-name>"}
key := client.ObjectKey{Namespace: "default", Name: "transport-operator"}
err := c.Get(context.Background(), key, u)
if err != nil {
reqLogger.Error(err, "error occured while getting the resource")
}
reqLogger.Info("Got the resource", "Resource Object", u)
}
func DeleteCR(u *unstructured.Unstructured, c client.Client) {
err := c.Delete(context.Background(), u)
if err != nil {
reqLogger.Error(err, "error occured while deleting the resource")
}
reqLogger.Info("Resource deleted", "Resource ", u)
}
Here is what I did in the Operator SDK to get a object from kubernetes.
Ref: https://github.com/kubernetes-sigs/controller-runtime/blob/master/pkg/client/example_test.go#L57
func (r *ConfigDeploymentReconciler) findObjectsForConfigMap(configMap client.Object) []reconcile.Request {
pod := &corev1.Pod{}
_ = r.Client.Get(context.Background(), client.ObjectKey{
Namespace: "prometheus",
Name: "prometheus-node-exporter-7vxqs",
}, pod)
fmt.Println("Got this pod", pod.String())
return nil
}

How do I write a pre/post traffic hook function in go?

I started using AWS SAM and for now I only have some unit tests, but I want to try to run integration tests in a pre traffic hook function.
Unfortunately there seems to be no code example for Golang, all I could find was for Javascript.
From this example I pieced together that I have to use the code deploy SDK and call PutLifecycleEventHookExecutionStatus, but the specifics remain unclear. The aws code example repo for go has no examples for code deploy either.
More Information about the topic that I am looking for is available here https://github.com/awslabs/serverless-application-model/blob/master/docs/safe_lambda_deployments.rst#pretraffic-posttraffic-hooks.
I want to start out by testing a lambda function that simply queries DynamoDB.
Something like this works:
package main
import (
"context"
"encoding/json"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/codedeploy"
)
type CodeDeployEvent struct {
DeploymentId string `json:"deploymentId"`
LifecycleEventHookExecutionId string `json:"lifecycleEventHookExecutionId"`
}
func HandleRequest(ctx context.Context, event CodeDeployEvent) (string, error) {
// add some tests here and change status flag as needed . . .
client := codedeploy.New(session.New())
params := &codedeploy.PutLifecycleEventHookExecutionStatusInput{
DeploymentId: &event.DeploymentId,
LifecycleEventHookExecutionId: &event.LifecycleEventHookExecutionId,
Status: "Succeeded",
}
req, _ := client.PutLifecycleEventHookExecutionStatusRequest(params)
_ = req.Send()
}
I got around to implement this and want to share my complete solution.
After figuring out how to use it, I decided against using it, because there are a couple of drawbacks.
there is no way to expose a new version of the canary to a dedicated portion of the user base, that means sometimes they'll hit the new or the old version
invoking functions that publish to sns will trigger all downstream actions, which might get the new or the old version of the downstream services, which would cause a lot of problems in case of breaking APIs
IAM changes affect both version immediately, possibly breaking the old version.
Instead, I deploy everything to a pre prod account, run my integration and e2e tests and if they succeed I'll deploy to prod
the cdk code to create a canary deployment:
const versionAlias = new lambda.Alias(this, 'Alias', {
aliasName: "alias",
version: this.lambda.currentVersion,
})
const preHook = new lambda.Function(this, 'LambdaPreHook', {
description: "pre hook",
code: lambda.Code.fromAsset('dist/upload/convert-pre-hook'),
handler: 'main',
runtime: lambda.Runtime.GO_1_X,
memorySize: 128,
timeout: cdk.Duration.minutes(1),
environment: {
FUNCTION_NAME: this.lambda.currentVersion.functionName,
},
reservedConcurrentExecutions: 5,
logRetention: RetentionDays.ONE_WEEK,
})
// this.lambda.grantInvoke(preHook) // this doesn't work, I need to grant invoke to all functions :s
preHook.addToRolePolicy(new iam.PolicyStatement({
actions: [
"lambda:InvokeFunction",
],
resources: ["*"],
effect: iam.Effect.ALLOW,
}))
const application = new codedeploy.LambdaApplication(this, 'CodeDeployApplication')
new codedeploy.LambdaDeploymentGroup(this, 'CanaryDeployment', {
application: application,
alias: versionAlias,
deploymentConfig: codedeploy.LambdaDeploymentConfig.ALL_AT_ONCE,
preHook: preHook,
autoRollback: {
failedDeployment: true,
stoppedDeployment: true,
deploymentInAlarm: false,
},
ignorePollAlarmsFailure: false,
// alarms:
// autoRollback: codedeploy.A
// postHook:
})
My go code of the pre hook function. PutLifecycleEventHookExecutionStatus tells code deploy if the pre hook succeeded or not. Unfortunately in case you fail the deployment message, the message you get in the cdk deploy output is utterly useless, so you need to check the pre/post hook logs.
In order to actually run the integration test I simply invoke the lambda and check if an error occurred.
package main
import (
"encoding/base64"
"fmt"
"log"
"os"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/codedeploy"
lambdaService "github.com/aws/aws-sdk-go/service/lambda"
)
var svc *codedeploy.CodeDeploy
var lambdaSvc *lambdaService.Lambda
type codeDeployEvent struct {
DeploymentId string `json:"deploymentId"`
LifecycleEventHookExecutionId string `json:"lifecycleEventHookExecutionId"`
}
func handler(e codeDeployEvent) error {
params := &codedeploy.PutLifecycleEventHookExecutionStatusInput{
DeploymentId: &e.DeploymentId,
LifecycleEventHookExecutionId: &e.LifecycleEventHookExecutionId,
}
err := handle()
if err != nil {
log.Println(err)
params.Status = aws.String(codedeploy.LifecycleEventStatusFailed)
} else {
params.Status = aws.String(codedeploy.LifecycleEventStatusSucceeded)
}
_, err = svc.PutLifecycleEventHookExecutionStatus(params)
if err != nil {
return fmt.Errorf("failed putting the lifecycle event hook execution status. the status was %s", *params.Status)
}
return nil
}
func handle() error {
functionName := os.Getenv("FUNCTION_NAME")
if functionName == "" {
return fmt.Errorf("FUNCTION_NAME not set")
}
log.Printf("function name: %s", functionName)
// invoke lambda via sdk
input := &lambdaService.InvokeInput{
FunctionName: &functionName,
Payload: nil,
LogType: aws.String(lambdaService.LogTypeTail), // returns the log in the response
InvocationType: aws.String(lambdaService.InvocationTypeRequestResponse), // synchronous - default
}
err := input.Validate()
if err != nil {
return fmt.Errorf("validating the input failed: %v", err)
}
resp, err := lambdaSvc.Invoke(input)
if err != nil {
return fmt.Errorf("failed to invoke lambda: %v", err)
}
decodeString, err := base64.StdEncoding.DecodeString(*resp.LogResult)
if err != nil {
return fmt.Errorf("failed to decode the log: %v", err)
}
log.Printf("log result: %s", decodeString)
if resp.FunctionError != nil {
return fmt.Errorf("lambda was invoked but returned error: %s", *resp.FunctionError)
}
return nil
}
func main() {
sess, err := session.NewSession()
if err != nil {
return
}
svc = codedeploy.New(sess)
lambdaSvc = lambdaService.New(sess)
lambda.Start(handler)
}

OpenShift API - cannot use config

I am trying to connect to an OpenShift/K8s cluster from inside a running pod via the Go API. Therfore, i am following the tutorial from here.
Currently i have a problem with creating the OpenShift build client, whose constructor gets a previously created rest.InClusterConfig() as an argument. This should work, since it is shown in the example, but i get this error:
cannot use restconfig (type *"k8s.io/client-go/rest".Config) as type *"github.com/openshift/client-go/vendor/k8s.io/client-go/rest".Config in argument to "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1".NewForConfig
I am a little confused, since rest.InClusterConfig() returns a *Config. This is accepted in corev1client.NewForConfig() which expects a *rest.Config. But buildv1client.NewForConfig() also expects a *rest.Config - but not exactly the restconfig i am creating with rest.InClusterConfig().
Where is my mistake? Bonus points for: I am taking my first steps with the API, and all it should do is to generate a second pod, from an image where some parameters are applied. Do i need the buildv1client client? This is pretty much Kubernetes core functionality.
The problem is that the package exists in the vendored folder in vendor/ and also on your $GOPATH. Vendoring "github.com/openshift/client-go" should solve your problem.
To answer your second question, for the use case you have described, not really. If you want to create an OpenShift build then yes you need to use the client as this API object does not exist in Kubernetes. If you want to simply create a Pod then you don't need the build client. A simple example for the API reference might look as follows:
package main
import (
"k8s.io/api/core/v1"
"k8s.io/client-go/tools/clientcmd"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
func main() {
kubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
clientcmd.NewDefaultClientConfigLoadingRules(),
&clientcmd.ConfigOverrides{},
)
namespace, _, err := kubeconfig.Namespace()
if err != nil {
panic(err)
}
restconfig, err := kubeconfig.ClientConfig()
if err != nil {
panic(err)
}
coreclient, err := corev1client.NewForConfig(restconfig)
if err != nil {
panic(err)
}
_, err = coreclient.Pods(namespace).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "example",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "ubuntu",
Image: "ubuntu:trusty",
Command: []string{"echo"},
Args: []string{"Hello World"},
},
},
},
})
if err != nil {
panic(err)
}
}

Resources