import (
"context"
"fmt"
infinimeshv1beta1 "github.com/infinimesh/operator/pkg/apis/infinimesh/v1beta1"
v1beta1 "k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
func (r *ReconcilePlatform) reconcileResetRootAccount(request reconcile.Request, instance *infinimeshv1beta1.Platform) error {
log := logger.WithName("Reset Root Account Pwd")
job := &v1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "example",
},
Spec: v1beta1.CronJobSpec{
Schedule: "* * * * *",
ConcurrencyPolicy: v1beta1.ForbidConcurrent,
JobTemplate: v1beta1.JobTemplate{
Spec: v1beta1.JobTemplateSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: "Never",
Containers: []corev1.Container{
{
Name: "cli",
Image: "busybox",
Command: []string{
"/bin/bash",
"-c",
"echo 1",
},
ImagePullPolicy: "Always",
},
},
},
},
},
},
},
}
I am getting error here
JobTemplate: v1beta1.JobTemplate{
Spec: v1beta1.JobTemplateSpec{
Template: corev1.PodTemplateSpec{
as might be I am not defining it in the right way. Please guide me the right way to create a cronjob in Go. You can also write your own way of writing cronjobs in golang as I want to automate the cronjob in the kubernetes operator as when I restart the platform, cronjob will create automatically.
A cronjob wraps a job which wraps a pod.So you need to put the PodTemplateSpec inside a JobSpec inside the CronJobSpec.
cronjob := &v1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "demo-cronjob",
Namespace: "gitlab",
},
Spec: v1beta1.CronJobSpec{
Schedule: "* * * * *",
ConcurrencyPolicy: v1beta1.ForbidConcurrent,
JobTemplate: v1beta1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
...
},
},
},
},
},
}
Related
I'm trying to parse a json output (added below) and add it into a new JSON file to save the variables
The values that i need are metadata.name and metadata.namespace.
The following JSON is the file that i need to parse and extract the values. I get this output from the command: kubectl get pod -o json.
{
"apiVersion": "v1",
"items": [
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"creationTimestamp": "2022-12-21T12:18:49Z",
"generateName": "backend-99fb66465-",
"labels": {
"app": "backend",
"pod-template-hash": "99fb66465"
},
"name": "backend-99fb66465-2lxwp",
"namespace": "testingspace",
},
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"creationTimestamp": "2022-12-21T12:18:49Z",
"generateName": "backend-99fb66465-",
"labels": {
"app": "backend",
"pod-template-hash": "99fb66465"
},
"name": "backend-99fb66465-2lxwp",
"namespace": "testingspace",
}
]
[...]
}
My ansible code is this one:
- name: Search for all running pods from file ./data/kubernetes/pods-status
shell: |
cat ./data/kubernetes/pods-status
register: pods
- name: Pods name
set_fact:
podnames: "{{ pods.stdout|from_json|json_query(names) }}"
podkind: "{{ pods.stdout|from_json|json_query(kind) }}"
vars:
names: 'items[*].metadata.name'
kind: 'items[*].kind'
- name: Copy pods information to local file
local_action:
module: copy
dest: "./data/kubernetes/mainpod.json"
#content: "{{ podsjson | to_json }} "
content: "{{ [{'val': item }] }}"
loop: "{{ podnames }}"
I'm expecting to have the following file:
{
"items": {
"name": "backend-99fb66465-2lxwp",
"namespace":"testingspace"
},
{
"name": "backend-99fb66465-2lxwp",
"namespace": "testingspace"
}
}
}
But so far i just have this one:
[{"val": "backup-mysqldump-27875520-d26j4"}]
Given the data in the dictionary for testing
pods:
apiVersion: v1
items:
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: '2022-12-21T12:18:49Z'
generateName: backend-99fb66465-
labels:
app: backend
pod-template-hash: 99fb66465
name: backend-99fb66465-2lxwp
namespace: testingspace
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: '2022-12-21T12:18:49Z'
generateName: backend-99fb66465-
labels:
app: backend
pod-template-hash: 99fb66465
name: backend-99fb66465-2lxwp
namespace: testingspace
Declare the query
name_space: "{{ pods|json_query(name_space_query) }}"
name_space_query: 'items[].{name: metadata.name,
namespace: metadata.namespace}'
gives
name_space:
- name: backend-99fb66465-2lxwp
namespace: testingspace
- name: backend-99fb66465-2lxwp
namespace: testingspace
Example of a complete playbook for testing
- hosts: localhost
vars:
pods:
apiVersion: v1
items:
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: '2022-12-21T12:18:49Z'
generateName: backend-99fb66465-
labels:
app: backend
pod-template-hash: 99fb66465
name: backend-99fb66465-2lxwp
namespace: testingspace
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: '2022-12-21T12:18:49Z'
generateName: backend-99fb66465-
labels:
app: backend
pod-template-hash: 99fb66465
name: backend-99fb66465-2lxwp
namespace: testingspace
name_space: "{{ pods|json_query(name_space_query) }}"
name_space_query: 'items[].{name: metadata.name,
namespace: metadata.namespace}'
tasks:
- debug:
var: name_space
I am having trouble adding the Ports field in ServiceSpec. What am I doing wrong?
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
port := corev1.ServicePort{}
port.Port = 8443
ports := make(corev1.ServicePort, 1)
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-webhook-admissions",
Namespace: "test",
Labels: map[string]string{
"app.kubernetes.io/instance": "test",
"app.kubernetes.io/name": "test",
"control-plane": "controller-manager",
},
},
Spec: corev1.ServiceSpec{
Ports: ports, // Not working
Selector: nil,
//ClusterIP: "",
},
}
Think you have to append the object port to your slice ports.
This worked for me
func GetLabels() map[string]string {
return map[string]string{
"app.kubernetes.io/instance": "test",
"app.kubernetes.io/name": "test",
"control-plane": "controller-manager",
}
}
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-webhook-admissions",
Namespace: namespace,
Labels: GetLabels(),
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "webhook",
Port: 8443,
TargetPort: intstr.FromInt(8443),
Protocol: "TCP",
},
},
Selector: GetLabels(),
},
}
err := w.Client.Create(context.Background(), service)
I want to deploy my microservice infrastructure as AKS at Azure. I created a node on which 3 microservices run. My API gateway should be able to be addressed with a public IP and data should be forwarded to my other two microservices.
PS /home/jan-marius> kubectl get pods
NAME READY STATUS RESTARTS AGE
apigateway-77875f89cb-qcmnf 1/1 Running 0 18h
contacts-5ccc69f74-x287p 1/1 Running 0 18h
templates-579fc4984b-srv7h 1/1 Running 0 18h
so far so good.After that I created a public IP from the Microsoft Docs and changed my Yaml file as follows.
az network public-ip create \
--resource-group myResourceGroup \
--name myAKSPublicIP \
--sku Standard \
--allocation-method static
apiVersion: apps/v1
kind: Deployment
metadata:
name: apigateway
spec:
replicas: 1
selector:
matchLabels:
app: apigateway
template:
metadata:
labels:
app: apigateway
spec:
nodeSelector:
"beta.kubernetes.io/os": linux
containers:
- name: apigateway
image: xxx.azurecr.io/apigateway:11
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 250m
memory: 512Mi
ports:
- containerPort: 8800
name: apigateway
---
apiVersion: v1
kind: Service
metadata:
annotations:
service.beta.kubernetes.io/azure-dns-label-name: tegos-sendmessage
name: apigateway
spec:
loadBalancerIP: 20.50.10.36
type: LoadBalancer
ports:
- port: 8800
selector:
app: apigateway
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: contacts
spec:
replicas: 1
selector:
matchLabels:
app: contacts
template:
metadata:
labels:
app: contacts
spec:
nodeSelector:
"beta.kubernetes.io/os": linux
containers:
- name: contacts
image: xxx.azurecr.io/contacts:12
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 250m
memory: 512Mi
ports:
- containerPort: 8100
name: contacts
---
apiVersion: v1
kind: Service
metadata:
name: contacts
spec:
ports:
- port: 8100
selector:
app: contacts
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: templates
spec:
replicas: 1
selector:
matchLabels:
app: templates
template:
metadata:
labels:
app: templates
spec:
nodeSelector:
"beta.kubernetes.io/os": linux
containers:
- name: templates
image: xxx.azurecr.io/templates:13
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 250m
memory: 512Mi
ports:
- containerPort: 8200
name: templates
---
apiVersion: v1
kind: Service
metadata:
name: templates
spec:
ports:
- port: 8200
selector:
app: templates
However, if I want to call the external IP address with get service, the status is
S /home/jan-marius> kubectl get service apigateway
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
apigateway LoadBalancer 10.0.181.113 <pending> 8800:30817/TCP 19h
PS /home/jan-marius> kubectl describe service apigateway
Name: apigateway
Namespace: default
Labels: <none>
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"service.beta.kubernetes.io/azure-dns-label-name":"tegos-sendmessage"},"nam...
service.beta.kubernetes.io/azure-dns-label-name: tegos-sendmessage
Selector: app=apigateway
Type: LoadBalancer
IP: 10.0.181.113
IP: 20.50.10.36
Port: <unset> 8800/TCP
TargetPort: 8800/TCP
NodePort: <unset> 30817/TCP
Endpoints: 10.244.0.14:8800
Session Affinity: None
External Traffic Policy: Cluster
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal EnsuringLoadBalancer 5m (x216 over 17h) service-controller Ensuring load balancer
I read on the net that this error can occur if the locations of the cluster and the external IP or the LoadBalancer types do not match. I am sure that the locations match. I can't be sure about the LoadBalancer types. The external IP SKU is set to standard. However, I have never defined the type of LoadBalancer and I don't know where it can be found. Can someone tell me what I'm doing wrong and how I can provide my web service?
[![enter image description here][1]][1]
PS /home/jan-marius> az aks show -g SendMessageResource -n SendMessageCluster
{
"aadProfile": null,
"addonProfiles": {
"httpapplicationrouting": {
"config": {
"HTTPApplicationRoutingZoneName": "e6e284534ad74c0d9c01.westeurope.aksapp.io"
},
"enabled": true,
"identity": null
},
"omsagent": {
"config": {
"loganalyticsworkspaceresourceid": "/subscriptions/a553134ba7eb-cb83-484d-a05d-44bb70125b8a/resourcegroups/defaultresourcegroup-weu/providers/microsoft.operationalinsights/workspaces/defaultworkspace-a55ba7eb-cb83-484d-a05d-44bb334170125b8a-weu"
},
"enabled": true,
"identity": null
}
},
"agentPoolProfiles": [
{
"availabilityZones": null,
"count": 1,
"enableAutoScaling": null,
"enableNodePublicIp": false,
"maxCount": null,
"maxPods": 110,
"minCount": null,
"mode": "System",
"name": "nodepool1",
"nodeLabels": {},
"nodeTaints": null,
"orchestratorVersion": "1.15.11",
"osDiskSizeGb": 100,
"osType": "Linux",
"provisioningState": "Succeeded",
"scaleSetEvictionPolicy": null,
"scaleSetPriority": null,
"spotMaxPrice": null,
"tags": null,
"type": "VirtualMachineScaleSets",
"vmSize": "Standard_DS2_v2"
}
],
"apiServerAccessProfile": null,
"autoScalerProfile": null,
"diskEncryptionSetId": null,
"dnsPrefix": "SendMessag-SendMessageResou-a55ba7",
"enablePodSecurityPolicy": null,
"enableRbac": true,
"fqdn": "sendmessag-sendmessageresou-a55ba7-14596671.hcp.westeurope.azmk8s.io",
"id": "/subscriptions/a55b3141a7eb-cb83-484d-a05d-44bb70125b8a/resourcegroups/SendMessageResource/providers/Microsoft.ContainerService/managedClusters/SendMessageCluster",
"identity": null,
"identityProfile": null,
"kubernetesVersion": "1.15.11",
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7bzXktZht3zLbHrz3Xpv3VNhtrj/XmBKOIHB0D0ZpBIrsfXcg9veBov8n3cU/F/oKIfqcL2xaoktVwZFz9AjEi7qPXdxrsVLjV2+w0kPyC3ZC5JbtLSO4CFgn0MtclC6mE3OPYczYPoFdZI3/w/AmoZ6TsT7MupkCjKtrYIIaDZ/22zuTMYMvJro7cfjKI5OSR7soybXcoFKw+3tzwO9Mv9lUQr7x0eRCUAUJN6OziEI9p36fLEnNgRG4GiJJZP5aqqsVRUDuu8PF9pO0YLMBr3b2HHgzpDwSebZ6TU//okuc30cqG/2v2LkjBDRGrs5YxiSv3+ejr/9A4XGWup4Z"
}
]
}
},
"location": "westeurope",
"maxAgentPools": 10,
"name": "SendMessageCluster",
"networkProfile": {
"dnsServiceIp": "10.0.0.10",
"dockerBridgeCidr": "172.17.0.1/16",
"loadBalancerProfile": {
"allocatedOutboundPorts": null,
"effectiveOutboundIps": [
{
"id": "/subscriptions/a55b3142a7eb-cb83-484d-a05d-44bb70125b8a/resourceGroups/MC_SendMessageResource_SendMessageCluster_westeurope/providers/Microsoft.Network/publicIPAddresses/988314172c28-d4da-431e-b7f8-5acb08e468b4",
"resourceGroup": "MC_SendMessageResource_SendMessageCluster_westeurope"
}
],
"idleTimeoutInMinutes": null,
"managedOutboundIps": {
"count": 1
},
"outboundIpPrefixes": null,
"outboundIps": null
},
"loadBalancerSku": "Standard",
"networkMode": null,
"networkPlugin": "kubenet",
"networkPolicy": null,
"outboundType": "loadBalancer",
"podCidr": "10.244.0.0/16",
"serviceCidr": "10.0.0.0/16"
},
"nodeResourceGroup": "MC_SendMessageResource_SendMessageCluster_westeurope",
"privateFqdn": null,
"provisioningState": "Succeeded",
"resourceGroup": "SendMessageResource",
"servicePrincipalProfile": {
"clientId": "9009bcd8-4933-4641-b00b-237e157d86589b"
},
"sku": {
"name": "Basic",
"tier": "Free"
},
"type": "Microsoft.ContainerService/ManagedClusters",
"windowsProfile": null
}
if your publicip is in another resource group - you need to specify the resource group for the ip:
apiVersion: v1
kind: Service
metadata:
annotations:
service.beta.kubernetes.io/azure-dns-label-name: tegos-sendmessage
service.beta.kubernetes.io/azure-load-balancer-resource-group: myResourceGroup
name: apigateway
spec:
loadBalancerIP: 20.50.10.36
type: LoadBalancer
ports:
- port: 8800
selector:
app: apigateway
I am new to kubernetes and not able to troubleshoot the issue.
service and pod is running but I am not able to get the response from the postman.
kubectl get service
kubectl get service personservice -o json
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"personservice\"},\"name\":\"personservice\",\"namespace\":\"default\"},\"spec\":{\"ports\":[{\"nodePort\":30001,\"port\":8080,\"protocol\":\"TCP\"}],\"selector\":{\"app\":\"personservice\"},\"type\":\"NodePort\"}}\n"
},
"creationTimestamp": "2019-11-07T11:22:04Z",
"labels": {
"app": "personservice"
},
"name": "personservice",
"namespace": "default",
"resourceVersion": "84920",
"selfLink": "/api/v1/namespaces/default/services/personservice",
"uid": "d32d4dd8-0150-11ea-80e6-00155d010311"
},
"spec": {
"clusterIP": "10.98.168.74",
"externalTrafficPolicy": "Cluster",
"ports": [
{
"nodePort": 30001,
"port": 8080,
"protocol": "TCP",
"targetPort": 8080
}
],
"selector": {
"app": "personservice"
},
"sessionAffinity": "None",
"type": "NodePort"
},
"status": {
"loadBalancer": {}
}
}
kubectl get pods
when I am trying to hit it using postman
http://100.120.60.210:30001/getPerson?nino=12345Ac&id=1
I get "There was an error connecting to http://100.120.60.210:30001/getPerson?nino=12345Ac&id=1."
IPv4 Address. . . . . . . . . . . : 100.120.60.210 --- Ip address of my laptop.
Edit 1:
pod deploy config:
apiVersion: apps/v1
kind: Deployment
metadata:
name: personservice
labels:
app: personservice
spec:
replicas: 1
selector:
matchLabels:
app: personservice
template:
metadata:
labels:
app: personservice
spec:
containers:
- name: personservice
image: microservice-k8s/personmicroservice-k8s:1.0
ports:
- containerPort: 8080
env:
- name: PROFILE
value: "dev"
- name: SERVER_PORT
value: "8080"
- name: ZIPKIN_URI
value: "http://100.120.60.210:9411"
Edit 2:
I have deployed another pod and service to test and it is working.
Ran this command for both the service
kubectl get service personservice -o json
The only diff I found is following:
Non-working service
"status": {
"loadBalancer": {}
}
working service:
"status": {
"loadBalancer": {
"ingress": [
{
"hostname": "localhost"
}
]
}
}
what could be the reason behind the empty status?
Edit 3:
Port forwarding to the pod and accessing it through http://localhost:7000 is working.
kubectl port-forward personservice-5c66cfcb89-dd6l7 7000:8080
This means something is wrong with the service only.
Run minikube tunnel to be able to connect to the ip directly.
I've reproduced your case in a slightly different scenario. You are using Docker Desktop with Kubernetes and I've decided to use Minikube to reproduce it.
I used your yaml files as a base to deploy an nginx service and I didn't face the problem you are describing.
This is my deployment:
apiVersion: apps/v1
kind: Deployment
metadata:
name: personservice
labels:
app: personservice
spec:
replicas: 1
selector:
matchLabels:
app: personservice
template:
metadata:
labels:
app: personservice
spec:
containers:
- name: personservice
image: nginx
ports:
- containerPort: 80
This is my Service:
apiVersion: v1
kind: Service
metadata:
labels:
app: personservice
name: personservice
namespace: default
spec:
externalTrafficPolicy: Cluster
ports:
- nodePort: 30001
port: 80
protocol: TCP
targetPort: 80
selector:
app: personservice
sessionAffinity: None
type: NodePort
Note: I'm using port 80 for my convenience only.
As you can see, these yaml files have the same components you have in yours and I can successfully curl my nginx application.
user#bf:~$ minikube ip
192.168.39.153
user#bf:~$ curl 192.168.122.36:30001
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
nginx.org.<br/>
Commercial support is available at
nginx.com.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
I highly suggest you to reproduce it in a different environment using Minikube for example. There are many advantages on using Minikube instead of Docker Desktop.
The issue is I cannot create a deployment spec without creating replication controller along with it.I would not like to use replication controller because my app always use only one pod and I would like to set restart policy to never to prevent any terminated container tries to restart.
apiVersion: v1
kind: Pod
metadata:
name: two-containers
spec:
restartPolicy: Never
volumes:
- name: shared-data
emptyDir: {}
containers:
- name: nginx-container
image: nginx
volumeMounts:
- name: shared-data
mountPath: /usr/share/nginx/html
- name: debian-container
image: debian
volumeMounts:
- name: shared-data
mountPath: /pod-data
command: ["/bin/sh"]
args: ["-c", "echo Hello from the debian container > /pod-data/index.html"]
Above is the target yaml file, which I would like to implement and deploy with kubernetes client-go, however client-go currently only provides deployment with replication controller.
// Define Deployments spec.
deploySpec := &v1beta1.Deployment{
TypeMeta: unversioned.TypeMeta{
Kind: "Deployment",
APIVersion: "extensions/v1beta1",
},
ObjectMeta: v1.ObjectMeta{
Name: "binary-search",
},
Spec: v1beta1.DeploymentSpec{
Replicas: int32p(1),
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Name: appName,
Labels: map[string]string{"app": appName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
v1.Container{
Name: "nginx-container",
Image: "nginx",
VolumeMounts: []v1.VolumeMount{
v1.VolumeMount{
MountPath: "/usr/share/nginx/html",
Name: "shared-data",
},
},
},
v1.Container{
Name: "debian-container",
Image: "debian",
VolumeMounts: []v1.VolumeMount{
v1.VolumeMount{
MountPath: "/pod-data",
Name: "shared-data",
},
},
Command: []string{
"/bin/sh",
},
Args: []string{
"-c",
"echo Hello from the debian container > /pod-data/index1.html",
},
},
},
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSClusterFirst,
Volumes: []v1.Volume{
v1.Volume{
Name: "shared-data",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
},
},
}
// Implement deployment update-or-create semantics.
deploy := c.Extensions().Deployments(namespace)
_, err := deploy.Update(deploySpec)
Any suggestion? Many thanks in advance!
If you don't want the service to be restarted, then you can just use the Pod directly. There is no need to use a Deployment, since these only make sense, if you want to have automatic Pod restarts and roll-outs of updates.
The code would look somehow like this (not tested):
podSpec := v1.PodSpec{
Containers: []v1.Container{
v1.Container{
Name: "nginx-container",
Image: "nginx",
VolumeMounts: []v1.VolumeMount{
v1.VolumeMount{
MountPath: "/usr/share/nginx/html",
Name: "shared-data",
},
},
},
v1.Container{
Name: "debian-container",
Image: "debian",
VolumeMounts: []v1.VolumeMount{
v1.VolumeMount{
MountPath: "/pod-data",
Name: "shared-data",
},
},
Command: []string{
"/bin/sh",
},
Args: []string{
"-c",
"echo Hello from the debian container > /pod-data/index1.html",
},
},
},
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSClusterFirst,
Volumes: []v1.Volume{
v1.Volume{
Name: "shared-data",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
}
// Implement deployment update-or-create semantics.
deploy := c.Core().PodsGetter(namespace)
_, err := deploy.Update(podSpec)