Skip to content

Commit

Permalink
E2E: Dualstack test (k3s-io#5617)
Browse files Browse the repository at this point in the history
* E2E dualstack test
* Improve testing documentation

Signed-off-by: Derek Nola <derek.nola@suse.com>
  • Loading branch information
dereknola committed Jun 14, 2022
1 parent d4522de commit 12695ce
Show file tree
Hide file tree
Showing 15 changed files with 467 additions and 38 deletions.
8 changes: 7 additions & 1 deletion tests/e2e/README.md
Expand Up @@ -42,4 +42,10 @@ To run the all E2E tests and generate JUnit testing reports:
ginkgo --junit-report=result.xml ./tests/e2e/...
```

Note: The `go test` default timeout is 10 minutes, thus the `-timeout` flag should be used. The `ginkgo` default timeout is 1 hour, no timeout flag is needed.
Note: The `go test` default timeout is 10 minutes, thus the `-timeout` flag should be used. The `ginkgo` default timeout is 1 hour, no timeout flag is needed.

# Debugging
In the event of a test failure, the cluster and VMs are retained in their broken state. Startup logs are retained in `vagrant.log`.
To see a list of nodes: `vagrant status`
To ssh into a node: `vagrant ssh <NODE>`
Once you are done/ready to restart the test, use `vagrant destroy -f` to remove the broken cluster.
36 changes: 36 additions & 0 deletions tests/e2e/amd64_resource_files/dualstack_clusterip.yaml
@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ds-clusterip-pod
spec:
selector:
matchLabels:
k8s-app: nginx-app-clusterip
replicas: 2
template:
metadata:
labels:
k8s-app: nginx-app-clusterip
spec:
containers:
- name: nginx
image: ranchertest/mytestcontainer
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: nginx-app-clusterip
name: ds-clusterip-svc
namespace: default
spec:
type: ClusterIP
ipFamilyPolicy: PreferDualStack
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
k8s-app: nginx-app-clusterip
16 changes: 16 additions & 0 deletions tests/e2e/amd64_resource_files/dualstack_ingress.yaml
@@ -0,0 +1,16 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ds-ingress
spec:
rules:
- host: testds.com
http:
paths:
- backend:
service:
# Reliant on dualstack_clusterip.yaml
name: ds-clusterip-svc
port:
number: 80
pathType: ImplementationSpecific
36 changes: 36 additions & 0 deletions tests/e2e/amd64_resource_files/dualstack_nodeport.yaml
@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ds-nodeport-pod
spec:
selector:
matchLabels:
k8s-app: nginx-app-nodeport
replicas: 2
template:
metadata:
labels:
k8s-app: nginx-app-nodeport
spec:
containers:
- name: nginx
image: ranchertest/mytestcontainer
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: nginx-app-nodeport
name: ds-nodeport-svc
namespace: default
spec:
type: NodePort
ipFamilyPolicy: PreferDualStack
ports:
- port: 80
nodePort: 30096
name: http
selector:
k8s-app: nginx-app-nodeport
111 changes: 111 additions & 0 deletions tests/e2e/dualstack/Vagrantfile
@@ -0,0 +1,111 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "server-1", "server-2", "agent-0" ])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
NETWORK4_PREFIX = "10.10.10"
NETWORK6_PREFIX = "a11:decf:c0ff:ee"
install_type = ""

def provision(vm, roles, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{roles[0]}-#{role_num}"
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}"
# Only works with libvirt, which allows IPv4 + IPv6 on a single network/interface
vm.network "private_network",
:ip => node_ip4,
:netmask => "255.255.255.0",
:libvirt__dhcp_enabled => false,
:libvirt__forward_mode => "none",
:libvirt__guest_ipv6 => "yes",
:libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1",
:libvirt__ipv6_prefix => "64"

vagrant_defaults = '../vagrantdefaults.rb'
load vagrant_defaults if File.exists?(vagrant_defaults)

defaultOSConfigure(vm)

vm.provision "IPv6 Setup", type: "shell", path: "../scripts/ipv6.sh", args: [node_ip4, node_ip6, vm.box]
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)

vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"

if roles.include?("server") && role_num == 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
k3s.config = <<~YAML
node-external-ip: #{node_ip4},#{node_ip6}
node-ip: #{node_ip4},#{node_ip6}
cluster-init: true
token: vagrant
cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56
service-cidr: 10.43.0.0/16,2001:cafe:42:1::/112
bind-address: #{NETWORK4_PREFIX}.100
flannel-iface: eth1
YAML
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
elsif roles.include?("server") && role_num != 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
k3s.config = <<~YAML
node-external-ip: #{node_ip4},#{node_ip6}
node-ip: #{node_ip4},#{node_ip6}
server: https://#{NETWORK4_PREFIX}.100:6443
token: vagrant
cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56
service-cidr: 10.43.0.0/16,2001:cafe:42:1::/112
flannel-iface: eth1
YAML
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
end
if roles.include?("agent")
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "agent "
k3s.config = <<~YAML
node-external-ip: #{node_ip4},#{node_ip6}
node-ip: #{node_ip4},#{node_ip6}
server: https://#{NETWORK4_PREFIX}.100:6443
token: vagrant
flannel-iface: eth1
YAML
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
end
end

Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload", "vagrant-libvirt"]
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end

if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node.vm, roles, role_num, i)
end
end
end

0 comments on commit 12695ce

Please sign in to comment.