In a custom Kubernetes operator implemented with the operator-sdk in golang is it possible to call the custom API directly and retrieve the object as YAML?
For example. I have a custom resource
apiVersion: test.com/v1alpha1
kind: TEST
metadata::
name: example-test
spec:
replicas: 3
randomname: value
I don't know ahead of time what the fields in the spec are going to be apart from replicas. So I am not able to create a go type that includes structs to hold the entries.
So rather than doing:
instance := &testv1alpha1.Test{}
err := r.client.Get(context.TODO(), nameSpaceName, instance)
I want to be able to do something like:
instanceYAML := genericContainer{}
err := r.client.GetGeneric(context.TODO(), nameSpaceName, instance)
and then parse the instanceYAML to check the entries.
This is called the "unstructured" client. The docs are pretty light so I recommend looking over the tests as examples https://github.com/kubernetes-sigs/controller-runtime/blob/ea32729106c995d9df310ac4731c2061490addfb/pkg/client/client_test.go#L1536-L1566
Using the unstructured type client all the operation done on the CoreAPI resource can be done on the CustomResourceDefinition(CRD)
package util
import (
"context"
"encoding/json"
"strconv"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var reqLogger logr.Logger
func CRUDonCRD(c client.Client) {
u := createUnstructuredObject(c)
GetCR(u, c)
PatchCR(u, c, 1)
DeleteCR(u, c)
}
func createUnstructuredObject(c client.Client) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(schema.GroupVersionKind{
// Group: "<crd group name>",
// Kind: "<crd kind>",
// Version: "<crd version>",
Group: "com.cumulus.netq.operator.transport",
Kind: "TransportOperator",
Version: "v1",
})
_ = c.Get(context.Background(), client.ObjectKey{
// Namespace: "<namespace>",
// Name: "cr name",
Namespace: "default",
Name: "transport-operator",
}, u)
return u
}
type patchStringValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
func PatchCR(u *unstructured.Unstructured, c client.Client, replicaCount int) error {
payload := []patchStringValue{{
Op: "replace",
Path: "/spec/replicas/replicasOpta",
Value: strconv.Itoa(replicaCount),
}}
payloadBytes, _ := json.Marshal(payload)
err := c.Patch(context.Background(), u, client.RawPatch(types.JSONPatchType, payloadBytes))
if err != nil {
reqLogger.Error(err, "error occured while patching")
}
reqLogger.Info("Patching is successful", "Patched Transport Operator Object", u)
return err
}
func GetCR(u *unstructured.Unstructured, c client.Client) {
// key := client.ObjectKey{Namespace: "default", Name: "<cr-name>"}
key := client.ObjectKey{Namespace: "default", Name: "transport-operator"}
err := c.Get(context.Background(), key, u)
if err != nil {
reqLogger.Error(err, "error occured while getting the resource")
}
reqLogger.Info("Got the resource", "Resource Object", u)
}
func DeleteCR(u *unstructured.Unstructured, c client.Client) {
err := c.Delete(context.Background(), u)
if err != nil {
reqLogger.Error(err, "error occured while deleting the resource")
}
reqLogger.Info("Resource deleted", "Resource ", u)
}
Here is what I did in the Operator SDK to get a object from kubernetes.
Ref: https://github.com/kubernetes-sigs/controller-runtime/blob/master/pkg/client/example_test.go#L57
func (r *ConfigDeploymentReconciler) findObjectsForConfigMap(configMap client.Object) []reconcile.Request {
pod := &corev1.Pod{}
_ = r.Client.Get(context.Background(), client.ObjectKey{
Namespace: "prometheus",
Name: "prometheus-node-exporter-7vxqs",
}, pod)
fmt.Println("Got this pod", pod.String())
return nil
}
Related
Following the k8s/controller-runtime/client example code (see here), which goes a bit like this
var c client.Client
func main() {
// Using a typed object.
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: "namespace",
Name: "name",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: "nginx",
Name: "nginx",
},
},
},
}
// c is a created client.
_ = c.Create(context.Background(), pod) // nil deref here
}
I get a nullptr dereference on _ = c.Create(context.Background(), pod). To me this makes sense, since I declared c, but never initialised it. However the example code also does that. What is going on here?
The correct way to initialise the client can be found here: https://pkg.go.dev/sigs.k8s.io/controller-runtime#v0.14.4/pkg/client#example-New
cl, err := client.New(config.GetConfigOrDie(), client.Options{})
if err != nil {
fmt.Println("failed to create client")
os.Exit(1)
}
I would like the following yaml
kind: bar
name: baryaml
to be unmarshaled inside the struct Resource
type Kind int
const (
KIND_FOO Kind = iota
KIND_BAR
)
type Resource struct {
Kind Kind
Name string
}
Could someone explain why the code below is unable to store the correct kind, even though it is being unmarshaled correctly?
# Output:
Unmarshaled kind: 1
yamlBar: {0 baryaml}
# Expected Output:
Unmarshaled kind: 1
yamlBar: {1 baryaml}
package main
import (
"fmt"
"gopkg.in/yaml.v3"
)
type Kind int
const (
KIND_FOO Kind = iota
KIND_BAR
)
func (k *Kind) UnmarshalYAML(value *yaml.Node) error {
var kind string
err := value.Decode(&kind)
if err != nil {
return err
}
var x Kind
switch kind {
case "foo":
x = KIND_FOO
case "bar":
x = KIND_BAR
default:
return fmt.Errorf("unknown kind: %s", kind)
}
k = &x
fmt.Println("Unmarshaled kind:", *k)
return nil
}
type Resource struct {
Kind Kind
Name string
}
func main() {
var yamlBar = `
kind: bar
name: baryaml
`
r := Resource{}
err := yaml.Unmarshal([]byte(yamlBar), &r)
if err != nil {
panic(err)
}
fmt.Println("yamlBar:", r)
}
Thanks to #JimB for suggesting to dereference the k pointer:
func (k *Kind) UnmarshalYAML(value *yaml.Node) error {
var kind string
err := value.Decode(&kind)
if err != nil {
return err
}
switch kind {
case "foo":
*k = KIND_FOO
case "bar":
*k = KIND_BAR
default:
return fmt.Errorf("unknown kind: %s", kind)
}
fmt.Println("Unmarshaled kind:", *k)
return nil
}
I am trying to mock mysql, but occur error: "because: there are no expected calls of the method "Pod" for that receiver. "
I confirmed that I have generated the Pod method with the Mockgen tool,
Below is my code
func TestPodService_Create(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockFactory := store.NewMockFactory(ctrl)
mockPod := store.NewMockPodStore(ctrl)
pods := fake.FakePod(10)
mockPod.EXPECT().Create(gomock.Eq(context.TODO()), gomock.Eq(pods[0])).Return(nil)
type fields struct {
store store.Factory
redisCli redis.RedisCli
}
type args struct {
ctx context.Context
pod *model.Pod
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
// TODO: Add test cases.
{
name: "test case 1",
fields: fields{store: mockFactory,},
args: args{
ctx: context.TODO(),
pod: &pods[0],
},
wantErr: false,
},
}
for _, tt := range tests {
fmt.Printf("begin to test\n")
podService := &PodService{store: tt.fields.store}
err := podService.Create(tt.args.ctx, tt.args.pod)
assert.Equal(t, tt.wantErr, err!=nil)
}
}
You need to include this line in your TestPodService_Create():
mockPod.EXPECT().Pod(gomock.Any()).AnyTimes()
Adjust the gomock.Any() and .AnyTimes() for your desired goals.
I am trying to create multi instance in GCP with cloud function, use golang programing.
I refer tutorial in https://medium.com/google-cloud/using-cloud-scheduler-and-cloud-functions-to-deploy-a-periodic-compute-engine-vm-worker-2b897ef68dc5 then write some customize in my context. Here is my code
package cloudfunctions
import (
"context"
"fmt"
"log"
"net/http"
"os"
"google.golang.org/api/compute/v1"
)
var ProjectID = ""
var Zone = ""
var Region = ""
var InstanceName = ""
var InstanceType = ""
func DeployInstance(w http.ResponseWriter, r *http.Request) {
ProjectID = os.Getenv("PROJECT_ID")
Zone = os.Getenv("ZONE")
Region = os.Getenv("REGION")
cs, err := InitComputeService()
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Fatal(err)
}
var listInstance = []string{"e2-standard-2", "e2-standard-8", "n2-standard-2", "n2-standard-8", "n1-custom-2-8192", "n1-custom-8-32768", "c2-standard-8" }
for i:=0; i < 7; i++ {
InstanceType = listInstance[i]
InstanceName = "benchmark-"+InstanceType
instance, err := GetInstance(cs)
if err != nil {
w.WriteHeader(http.StatusTemporaryRedirect)
w.Write([]byte(err.Error() + " instance may not exist yet"))
log.Print(err)
_, err = CreateInstance(cs)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("creating instance " + InstanceName + "in zone: " + Zone))
startInstance(cs, w)
}
} else {
msg := "instance is in intermediate state: " + instance.Status
w.WriteHeader(http.StatusAccepted)
w.Write([]byte(msg))
log.Println(msg)
}
}
}
func InitComputeService() (*compute.Service, error) {
ctx := context.Background()
return compute.NewService(ctx)
}
func GetInstance(computeService *compute.Service) (*compute.Instance, error) {
return computeService.Instances.Get(ProjectID, Zone, InstanceName).Do()
}
func StartInstance(computeService *compute.Service) (*compute.Operation, error) {
return computeService.Instances.Start(ProjectID, Zone, InstanceName).Do()
}
// CreateInstance creates a given instance with metadata that logs its information.
func CreateInstance(computeService *compute.Service) (*compute.Operation, error) {
instance := &compute.Instance{
Name: InstanceName,
MachineType: fmt.Sprintf("zones/%s/machineTypes/%s", Zone, InstanceType),
NetworkInterfaces: []*compute.NetworkInterface{
{
Name: "default",
Subnetwork: fmt.Sprintf("projects/%s/regions/%s/subnetworks/default", ProjectID, Region),
AccessConfigs: []*compute.AccessConfig{
{
Name: "External NAT",
Type: "ONE_TO_ONE_NAT",
NetworkTier: "PREMIUM",
},
},
},
},
Scheduling: &compute.Scheduling{
Preemptible: true,
},
Disks: []*compute.AttachedDisk{
{
Boot: true, // The first disk must be a boot disk.
AutoDelete: true, //Optional
Mode: "READ_WRITE", //Mode should be READ_WRITE or READ_ONLY
Interface: "SCSI", //SCSI or NVME - NVME only for SSDs
InitializeParams: &compute.AttachedDiskInitializeParams{
DiskName: "worker-instance-boot-disk",
SourceImage: "projects/centos-cloud/global/images/family/centos-7",
DiskType: fmt.Sprintf("projects/%s/zones/%s/diskTypes/pd-ssd", ProjectID, Zone),
DiskSizeGb: 200,
},
},
},
}
return computeService.Instances.Insert(ProjectID, Zone, instance).Do()
}
// startInstance is a wrapper function for the switch statement
func startInstance(cs *compute.Service, w http.ResponseWriter) {
operation, err := StartInstance(cs)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Fatal(err)
}
w.WriteHeader(http.StatusOK)
data, _ := operation.MarshalJSON()
w.Write(data)
}
In above code, I want to create 7 instance with 7 difference setting, specific is instance type and instance name. I test this code in cloud function with DeployInstance is start function. But there is only one instance was created, with name is benchmark-e2-standard-2 and type is e2-standard-2. It output an error with message Error: Infrastructure cannot communicate with function. There was likely a crash or deadlock in the user-provided code. Additional troubleshooting documentation can be found at https://cloud.google.com/functions/docs/troubleshooting#logging
I visited website but I not find a solution to fix my code. Who can help me why my code not true, how can I fix it. Step by step if possible.
Thanks in advance.
I was found my answer. Root cause is each instance must have a disk partition, with different name.
So, I change my code with some change, you can see it bellow.
package cloudfunctions
import (
"context"
"fmt"
"log"
"net/http"
"os"
"google.golang.org/api/compute/v1"
"time"
)
var ProjectID = ""
var Zone = ""
var Region = ""
var InstanceName = ""
var InstanceType = ""
var IDiskName = ""
func DeployInstance(w http.ResponseWriter, r *http.Request) {
ProjectID = os.Getenv("PROJECT_ID")
Zone = os.Getenv("ZONE")
Region = os.Getenv("REGION")
var listInstance = []string{"e2-standard-8","e2-standard-2", "n2-standard-2", "n2-standard-8", "n1-custom-2-8192", "n1-custom-8-32768", "c2-standard-8"}
for i:=0; i < len(listInstance); i++ {
cs, err := compute.NewService(context.Background())
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Fatal(err)
}
InstanceType = listInstance[i]
InstanceName = "benchmark-"+InstanceType
IDiskName = InstanceName+"-boot-disk"
instance, err := GetInstance(cs)
if err != nil {
w.WriteHeader(http.StatusTemporaryRedirect)
w.Write([]byte(err.Error() + " instance may not exist yet"))
_, err = CreateInstance(cs)
if err != nil {
for {
disk, derr := cs.Disks.Get(ProjectID, Zone, IDiskName).Context(context.Background()).Do()
log.Print(IDiskName + " is " + disk.Status)
time.Sleep(1 * time.Second)
if derr != nil {
startInstance(cs, w)
break
}
}
}
} else {
msg := "instance "+ InstanceName +" is in intermediate state: " + instance.Status
w.WriteHeader(http.StatusAccepted)
w.Write([]byte(msg))
log.Println(msg)
}
}
}
func GetInstance(computeService *compute.Service) (*compute.Instance, error) {
return computeService.Instances.Get(ProjectID, Zone, InstanceName).Do()
}
func StartInstance(computeService *compute.Service) (*compute.Operation, error) {
return computeService.Instances.Start(ProjectID, Zone, InstanceName).Do()
}
// CreateInstance creates a given instance with metadata that logs its information.
func CreateInstance(computeService *compute.Service) (*compute.Operation, error) {
instance := &compute.Instance{
Name: InstanceName,
MachineType: fmt.Sprintf("zones/%s/machineTypes/%s", Zone, InstanceType),
NetworkInterfaces: []*compute.NetworkInterface{
{
Name: "default",
Subnetwork: fmt.Sprintf("projects/%s/regions/%s/subnetworks/default", ProjectID, Region),
AccessConfigs: []*compute.AccessConfig{
{
Name: "External NAT",
Type: "ONE_TO_ONE_NAT",
NetworkTier: "PREMIUM",
},
},
},
},
Scheduling: &compute.Scheduling{
Preemptible: true,
},
Disks: []*compute.AttachedDisk{
{
Boot: true, // The first disk must be a boot disk.
AutoDelete: true, //Optional
Mode: "READ_WRITE", //Mode should be READ_WRITE or READ_ONLY
Interface: "SCSI", //SCSI or NVME - NVME only for SSDs
InitializeParams: &compute.AttachedDiskInitializeParams{
DiskName: IDiskName,
SourceImage: "projects/centos-cloud/global/images/family/centos-7",
DiskType: fmt.Sprintf("projects/%s/zones/%s/diskTypes/pd-ssd", ProjectID, Zone),
DiskSizeGb: 100,
},
},
},
}
return computeService.Instances.Insert(ProjectID, Zone, instance).Do()
}
// startInstance is a wrapper function for the switch statement
func startInstance(cs *compute.Service, w http.ResponseWriter) {
operation, err := StartInstance(cs)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Fatal(err)
}
w.WriteHeader(http.StatusOK)
data, _ := operation.MarshalJSON()
w.Write(data)
}
If you have any question about this problem, drop your comment. I hope I can support for you.
Thanks all.
I am working on adding private data into the Hyperledger using Fabric SDK Go but getting error while invoking the data.
Instantiate Chaincode
ccPolicy, err := cauthdsl.FromString("AND ('Org1MSP.member','Org2MSP.member')")
resMgmt.InstantiateCC(
setup.Org.ChannelID,
resmgmt.InstantiateCCRequest{
Name: chaincodeId,
Path: setup.Org.ChaincodePath,
Version: chaincodeVersion,
Args: [][]byte{[]byte("init")},
Policy: ccPolicy,
},resmgmt.WithRetry(retry.DefaultResMgmtOpts))
collections_config.json
[
{
"name": "collectionMedium",
"policy": "AND ('Org1MSP.member', 'Org2MSP.member')",
"requiredPeerCount": 0,
"maxPeerCount": 3,
"blockToLive":1000000
},
{
"name": "collectionPrivate",
"policy": "OR('Org2MSP.member')",
"requiredPeerCount": 0,
"maxPeerCount": 3,
"blockToLive":5
}
]
Invoke
product := &model.Product{id, name, color, length, width}
productBytes, err1 := json.Marshal(product)
if err1 != nil {
return shim.Error(err1.Error())
}
err2 := stub.PutPrivateData("collectionMedium", id, productBytes)
if err2 != nil {
return shim.Error(err2.Error())
}
Error
Chaincode status Code: (500) UNKNOWN. Description: PUT_STATE failed: collection config not defined for chaincode [CC_ORG_V00], pass the collection configuration upon chaincode definition/instantiation
so, it says collection config is not defined during the instantiation of the chaincode. But, i don't know exactly how to add collection config in chaincode instantiation request.
please suggest me solution.
I am able to create CollConfig requests in following manner and able to add collection config into my chaincode instantiation.
My Solution
#CollConfig 1
var collCfg1RequiredPeerCount, collCfg1MaximumPeerCount int32
var collCfg1BlockToLive uint64
collCfg1Name := "collectionMedium"
collCfg1BlockToLive = 1000
collCfg1RequiredPeerCount = 0
collCfg1MaximumPeerCount = 3
collCfg1Policy := "OR('Org1MSP.member','Org2MSP.member')"
collCfg1, err := newCollectionConfig(collCfg1Name,collCfg1Policy, collCfg1RequiredPeerCount,collCfg1MaximumPeerCount,collCfg1BlockToLive)
if err != nil {
return errors.WithMessage(err, "failed to create collection config 1")
}
#CollConfig 2
var collCfg2RequiredPeerCount, collCfg2MaximumPeerCount int32
var collCfg2BlockToLive uint64
collCfg2Name := "collectionPrivate"
collCfg2BlockToLive = 100
collCfg2RequiredPeerCount = 0
collCfg2MaximumPeerCount = 3
collCfg2Policy := "OR('Org2MSP.member')"
collCfg2, err := newCollectionConfig(collCfg2Name,collCfg2Policy, collCfg2RequiredPeerCount,collCfg2MaximumPeerCount,collCfg2BlockToLive)
if err != nil {
return errors.WithMessage(err, "failed to create collection config 1")
}
# Instantiate Chaincode
cfg := []*cb.CollectionConfig{collCfg1,collCfg2}
resp, err := resMgmt.InstantiateCC(
setup.Org.ChannelID,
resmgmt.InstantiateCCRequest{
Name: chaincodeId,
Path: setup.Org.ChaincodePath,
Version: chaincodeVersion,
Args: [][]byte{[]byte("init")},
Policy: ccPolicy,
CollConfig: cfg,
},resmgmt.WithRetry(retry.DefaultResMgmtOpts))
#CollConfig Create Request Method
func newCollectionConfig(colName, policy string, reqPeerCount, maxPeerCount int32,
blockToLive uint64) (*cb.CollectionConfig, error) {
p, err := cauthdsl.FromString(policy)
if err != nil {
return nil, err
}
cpc := &cb.CollectionPolicyConfig{
Payload: &cb.CollectionPolicyConfig_SignaturePolicy{
SignaturePolicy: p,
},
}
return &cb.CollectionConfig{
Payload: &cb.CollectionConfig_StaticCollectionConfig{
StaticCollectionConfig: &cb.StaticCollectionConfig{
Name: colName,
MemberOrgsPolicy: cpc,
RequiredPeerCount: reqPeerCount,
MaximumPeerCount: maxPeerCount,
BlockToLive: blockToLive,
},
},
}, nil }
you should be able supply it as a parameter to the InstantiateCCRequest struct - specifically CollConfig - you can check out the structure in the Go docs -> https://github.com/hyperledger/fabric-sdk-go/blob/master/pkg/client/resmgmt/resmgmt.go#L69 and the CollectionConfig type is described here -> https://github.com/hyperledger/fabric-sdk-go/blob/master/third_party/github.com/hyperledger/fabric/protos/common/collection.pb.go#L69