Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: use correct wasm shims names #9519

Merged
merged 2 commits into from
Mar 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions pkg/agent/containerd/runtimes.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,31 +79,31 @@ func findNvidiaContainerRuntimes(foundRuntimes runtimeConfigs) {
func findWasiRuntimes(foundRuntimes runtimeConfigs) {
potentialRuntimes := runtimeConfigs{
"lunatic": {
RuntimeType: "io.containerd.lunatic.v2",
RuntimeType: "io.containerd.lunatic.v1",
BinaryName: "containerd-shim-lunatic-v1",
},
"slight": {
RuntimeType: "io.containerd.slight.v2",
RuntimeType: "io.containerd.slight.v1",
BinaryName: "containerd-shim-slight-v1",
},
"spin": {
RuntimeType: "io.containerd.spin.v2",
BinaryName: "containerd-shim-spin-v1",
BinaryName: "containerd-shim-spin-v2",
},
"wws": {
RuntimeType: "io.containerd.wws.v2",
RuntimeType: "io.containerd.wws.v1",
BinaryName: "containerd-shim-wws-v1",
},
"wasmedge": {
RuntimeType: "io.containerd.wasmedge.v2",
RuntimeType: "io.containerd.wasmedge.v1",
BinaryName: "containerd-shim-wasmedge-v1",
},
"wasmer": {
RuntimeType: "io.containerd.wasmer.v2",
RuntimeType: "io.containerd.wasmer.v1",
BinaryName: "containerd-shim-wasmer-v1",
},
"wasmtime": {
RuntimeType: "io.containerd.wasmtime.v2",
RuntimeType: "io.containerd.wasmtime.v1",
BinaryName: "containerd-shim-wasmtime-v1",
},
}
Expand Down
18 changes: 9 additions & 9 deletions pkg/agent/containerd/runtimes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func Test_UnitFindContainerRuntimes(t *testing.T) {
BinaryName: "/tmp/testExecutables/crun",
},
"lunatic": {
RuntimeType: "io.containerd.lunatic.v2",
RuntimeType: "io.containerd.lunatic.v1",
BinaryName: "/tmp/testExecutables/containerd-shim-lunatic-v1",
},
},
Expand All @@ -57,39 +57,39 @@ func Test_UnitFindContainerRuntimes(t *testing.T) {
"containerd-shim-wasmtime-v1",
"containerd-shim-lunatic-v1",
"containerd-shim-slight-v1",
"containerd-shim-spin-v1",
"containerd-shim-spin-v2",
"containerd-shim-wws-v1",
"containerd-shim-wasmedge-v1",
"containerd-shim-wasmer-v1",
},
},
want: runtimeConfigs{
"wasmtime": {
RuntimeType: "io.containerd.wasmtime.v2",
RuntimeType: "io.containerd.wasmtime.v1",
BinaryName: "/tmp/testExecutables/containerd-shim-wasmtime-v1",
},
"lunatic": {
RuntimeType: "io.containerd.lunatic.v2",
RuntimeType: "io.containerd.lunatic.v1",
BinaryName: "/tmp/testExecutables/containerd-shim-lunatic-v1",
},
"slight": {
RuntimeType: "io.containerd.slight.v2",
RuntimeType: "io.containerd.slight.v1",
BinaryName: "/tmp/testExecutables/containerd-shim-slight-v1",
},
"spin": {
RuntimeType: "io.containerd.spin.v2",
BinaryName: "/tmp/testExecutables/containerd-shim-spin-v1",
BinaryName: "/tmp/testExecutables/containerd-shim-spin-v2",
},
"wws": {
RuntimeType: "io.containerd.wws.v2",
RuntimeType: "io.containerd.wws.v1",
BinaryName: "/tmp/testExecutables/containerd-shim-wws-v1",
},
"wasmedge": {
RuntimeType: "io.containerd.wasmedge.v2",
RuntimeType: "io.containerd.wasmedge.v1",
BinaryName: "/tmp/testExecutables/containerd-shim-wasmedge-v1",
},
"wasmer": {
RuntimeType: "io.containerd.wasmer.v2",
RuntimeType: "io.containerd.wasmer.v1",
BinaryName: "/tmp/testExecutables/containerd-shim-wasmer-v1",
},
},
Expand Down
126 changes: 126 additions & 0 deletions tests/e2e/amd64_resource_files/wasm-workloads.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: wasm-slight
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: wasm-slight
template:
metadata:
labels:
app: wasm-slight
spec:
runtimeClassName: slight
containers:
- name: slight-hello
image: ghcr.io/deislabs/containerd-wasm-shims/examples/slight-rust-hello:v0.9.1
command: ["/"]
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: wasm-spin
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: wasm-spin
template:
metadata:
labels:
app: wasm-spin
spec:
runtimeClassName: spin
containers:
- name: spin-hello
image: ghcr.io/deislabs/containerd-wasm-shims/examples/spin-rust-hello:v0.11.1
command: ["/"]
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
---
# create a traefik middleware
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: strip-prefix
namespace: default
spec:
stripPrefix:
forceSlash: false
prefixes:
- /spin
- /slight
---
# define the slight service
apiVersion: v1
kind: Service
metadata:
name: wasm-slight
namespace: default
spec:
ports:
- protocol: TCP
port: 80
targetPort: 3000
selector:
app: wasm-slight
---
# define the spin service
apiVersion: v1
kind: Service
metadata:
name: wasm-spin
namespace: default
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
app: wasm-spin
---
# define a single ingress, that exposes both services
# using a path route
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress
namespace: default
annotations:
ingress.kubernetes.io/ssl-redirect: "false"
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.middlewares: default-strip-prefix@kubernetescrd
spec:
rules:
- http:
paths:
- path: /slight
pathType: Prefix
backend:
service:
name: wasm-slight
port:
number: 80
- path: /spin
pathType: Prefix
backend:
service:
name: wasm-spin
port:
number: 80
96 changes: 96 additions & 0 deletions tests/e2e/wasm/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""

SPIN_DOWNLOAD_URL = "https://github.com/deislabs/containerd-wasm-shims/releases/download/v0.11.1/containerd-wasm-shims-v2-spin-linux-x86_64.tar.gz"
SLIGHT_DOWNLOAD_URL = "https://github.com/deislabs/containerd-wasm-shims/releases/download/v0.11.1/containerd-wasm-shims-v1-slight-linux-x86_64.tar.gz"

INSTALL_WASM_SHIMS = <<-SCRIPT
curl -fsSL -o spin.tar.gz #{SPIN_DOWNLOAD_URL}
tar xf spin.tar.gz

curl -fsSL -o slight.tar.gz #{SLIGHT_DOWNLOAD_URL}
tar xf slight.tar.gz

mv containerd-shim-* /usr/bin
rm *tar.gz
SCRIPT

def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0"

scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults

defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)

vm.provision "shell", inline: "ping -c 2 k3s.io"
vm.provision "Install run-wasi containerd shims", type: "shell", inline: INSTALL_WASM_SHIMS

if role.include?("server") && role_num == 0

dockerInstall(vm)
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server "
k3s.config = <<~YAML
cluster-init: true
node-external-ip: #{NETWORK_PREFIX}.100
token: vagrant
flannel-iface: eth1
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end

if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
end
end

Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end

if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end

# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
provision(node.vm, name, role_num, i)
end
end
end
Loading
Loading