Skip to content

Commit

Permalink
Upgrade Jenkins CI with latest capv (antrea-io#681)
Browse files Browse the repository at this point in the history
* new template config file
* new setup and cleanup scripts
  • Loading branch information
lzhecheng authored and antoninbas committed Jun 16, 2020
1 parent c912180 commit 6391ccc
Show file tree
Hide file tree
Showing 8 changed files with 281 additions and 326 deletions.
179 changes: 170 additions & 9 deletions ci/cluster-api/vsphere/templates/cluster.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
apiVersion: cluster.x-k8s.io/v1alpha2
apiVersion: cluster.x-k8s.io/v1alpha3
kind: Cluster
metadata:
name: CLUSTERNAME
Expand All @@ -12,13 +12,44 @@ spec:
services:
cidrBlocks:
- 100.64.0.0/13
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
kind: KubeadmControlPlane
name: CLUSTERNAME
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: VSphereCluster
name: CLUSTERNAME
namespace: CLUSTERNAMESPACE
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: HAProxyLoadBalancer
metadata:
labels:
cluster.x-k8s.io/cluster-name: CLUSTERNAME
name: CLUSTERNAME
namespace: CLUSTERNAMESPACE
spec:
user:
authorizedKeys:
- SSHAUTHORIZEDKEYS
name: capvs
virtualMachineConfiguration:
cloneMode: linkedClone
datacenter: DATACENTERNAME
datastore: WorkloadDatastore
diskGiB: 25
folder: antrea
memoryMiB: 8192
network:
devices:
- dhcp4: true
networkName: NETWORKNAME
numCPUs: 2
resourcePool: RESOURCEPOOLPATH
server: VCENTERNAME
template: capv-haproxy-v0.6.3
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: VSphereCluster
metadata:
name: CLUSTERNAME
Expand All @@ -33,13 +64,13 @@ spec:
name: NETWORKNAME
providerConfig:
cloud:
controllerImage: gcr.io/cloud-provider-vsphere/cpi/release/manager:v1.1.0
controllerImage: gcr.io/cloud-provider-vsphere/cpi/release/manager:v1.0.0
storage:
attacherImage: quay.io/k8scsi/csi-attacher:v1.1.1
controllerImage: gcr.io/cloud-provider-vsphere/csi/release/driver:v1.0.1
controllerImage: gcr.io/cloud-provider-vsphere/csi/release/driver:v1.0.2
livenessProbeImage: quay.io/k8scsi/livenessprobe:v1.1.0
metadataSyncerImage: gcr.io/cloud-provider-vsphere/csi/release/syncer:v1.0.1
nodeDriverImage: gcr.io/cloud-provider-vsphere/csi/release/driver:v1.0.1
metadataSyncerImage: gcr.io/cloud-provider-vsphere/csi/release/syncer:v1.0.2
nodeDriverImage: gcr.io/cloud-provider-vsphere/csi/release/driver:v1.0.2
provisionerImage: quay.io/k8scsi/csi-provisioner:v1.2.1
registrarImage: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0
virtualCenter:
Expand All @@ -49,6 +80,136 @@ spec:
datacenter: DATACENTERNAME
datastore: WorkloadDatastore
folder: antrea
resourcePool: 'RESOURCEPOOLPATH'
resourcePool: RESOURCEPOOLPATH
server: VCENTERNAME
loadBalancerRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: HAProxyLoadBalancer
name: CLUSTERNAME
server: VCENTERNAME
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: VSphereMachineTemplate
metadata:
name: CLUSTERNAME
namespace: CLUSTERNAMESPACE
spec:
template:
spec:
cloneMode: linkedClone
datacenter: DATACENTERNAME
datastore: WorkloadDatastore
diskGiB: 25
folder: antrea
memoryMiB: 8192
network:
devices:
- dhcp4: true
networkName: NETWORKNAME
numCPUs: 2
resourcePool: RESOURCEPOOLPATH
server: VCENTERNAME
template: ubuntu-1804-kube-v1.17.3
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
kind: KubeadmControlPlane
metadata:
name: CLUSTERNAME
namespace: CLUSTERNAMESPACE
spec:
infrastructureTemplate:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: VSphereMachineTemplate
name: CLUSTERNAME
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
extraArgs:
cloud-provider: external
controllerManager:
extraArgs:
cloud-provider: external
initConfiguration:
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs:
cloud-provider: external
name: '{{ ds.meta_data.hostname }}'
joinConfiguration:
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs:
cloud-provider: external
name: '{{ ds.meta_data.hostname }}'
preKubeadmCommands:
- hostname "{{ ds.meta_data.hostname }}"
- echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts
- echo "127.0.0.1 localhost" >>/etc/hosts
- echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts
- echo "{{ ds.meta_data.hostname }}" >/etc/hostname
useExperimentalRetryJoin: true
users:
- name: capv
sshAuthorizedKeys:
- SSHAUTHORIZEDKEYS
sudo: ALL=(ALL) NOPASSWD:ALL
replicas: 1
version: v1.17.3
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
kind: KubeadmConfigTemplate
metadata:
name: CLUSTERNAME-md-0
namespace: CLUSTERNAMESPACE
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs:
cloud-provider: external
name: '{{ ds.meta_data.hostname }}'
preKubeadmCommands:
- hostname "{{ ds.meta_data.hostname }}"
- echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts
- echo "127.0.0.1 localhost" >>/etc/hosts
- echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts
- echo "{{ ds.meta_data.hostname }}" >/etc/hostname
useExperimentalRetryJoin: true
users:
- name: capv
sshAuthorizedKeys:
- SSHAUTHORIZEDKEYS
sudo: ALL=(ALL) NOPASSWD:ALL
---
apiVersion: cluster.x-k8s.io/v1alpha3
kind: MachineDeployment
metadata:
labels:
cluster.x-k8s.io/cluster-name: CLUSTERNAME
name: CLUSTERNAME-md-0
namespace: CLUSTERNAMESPACE
spec:
clusterName: CLUSTERNAME
replicas: 2
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: CLUSTERNAME
template:
metadata:
labels:
cluster.x-k8s.io/cluster-name: CLUSTERNAME
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
kind: KubeadmConfigTemplate
name: CLUSTERNAME-md-0
clusterName: CLUSTERNAME
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: VSphereMachineTemplate
name: CLUSTERNAME
version: v1.17.3

72 changes: 0 additions & 72 deletions ci/cluster-api/vsphere/templates/controlplane.yaml

This file was deleted.

73 changes: 0 additions & 73 deletions ci/cluster-api/vsphere/templates/machinedeployment.yaml

This file was deleted.

2 changes: 1 addition & 1 deletion ci/cluster-api/vsphere/templates/namespace.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": "NAMESPACENAME",
"name": "CLUSTERNAMESPACE",
"labels": {
"antrea-ci": "true"
}
Expand Down
5 changes: 3 additions & 2 deletions ci/jenkins/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@ At the moment these Jenkins jobs are running on VMC (VMware on AWS). As a
result, all jobs' results and details are available publicly
[here](https://jenkins.antrea-ci.rocks/). We are using Cluster API for vSphere
([CAPV](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere)) for
creating and managing workload clusters. For each job build, a completely new
workload cluster will be created. As soon as the build finishes, the cluster
creating and managing workload clusters. The management cluster is a kind cluster
on Jenkins node. For each job build, a completely new workload cluster will be created
by this management cluster. As soon as the build finishes, the cluster
should be deleted. This ensures that all tests are run on a clean testbed.

### List of Jenkins jobs
Expand Down
Loading

0 comments on commit 6391ccc

Please sign in to comment.