diff --git a/CHANGELOG/CHANGELOG-1.16.md b/CHANGELOG/CHANGELOG-1.16.md index 4dc6ba6a9e01d..567f885f619f1 100644 --- a/CHANGELOG/CHANGELOG-1.16.md +++ b/CHANGELOG/CHANGELOG-1.16.md @@ -1,78 +1,93 @@ -- [v1.16.9](#v1169) - - [Downloads for v1.16.9](#downloads-for-v1169) - - [Client Binaries](#client-binaries) - - [Server Binaries](#server-binaries) - - [Node Binaries](#node-binaries) - - [Changelog since v1.16.8](#changelog-since-v1168) +- [v1.16.10](#v11610) + - [Downloads for v1.16.10](#downloads-for-v11610) + - [Source Code](#source-code) + - [Client binaries](#client-binaries) + - [Server binaries](#server-binaries) + - [Node binaries](#node-binaries) + - [Changelog since v1.16.9](#changelog-since-v1169) - [Changes by Kind](#changes-by-kind) - - [Feature](#feature) + - [API Change](#api-change) - [Bug or Regression](#bug-or-regression) - [Other (Cleanup or Flake)](#other-cleanup-or-flake) -- [v1.16.8](#v1168) - - [Downloads for v1.16.8](#downloads-for-v1168) + - [Dependencies](#dependencies) + - [Added](#added) + - [Changed](#changed) + - [Removed](#removed) +- [v1.16.9](#v1169) + - [Downloads for v1.16.9](#downloads-for-v1169) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.16.7](#changelog-since-v1167) + - [Changelog since v1.16.8](#changelog-since-v1168) - [Changes by Kind](#changes-by-kind-1) - - [API Change](#api-change) - - [Other (Bug, Cleanup or Flake)](#other-bug-cleanup-or-flake) -- [v1.16.7](#v1167) - - [Downloads for v1.16.7](#downloads-for-v1167) + - [Feature](#feature) + - [Bug or Regression](#bug-or-regression-1) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) +- [v1.16.8](#v1168) + - [Downloads for v1.16.8](#downloads-for-v1168) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.16.6](#changelog-since-v1166) + - [Changelog since v1.16.7](#changelog-since-v1167) - [Changes by Kind](#changes-by-kind-2) - - [Other (Bug, Cleanup or Flake)](#other-bug-cleanup-or-flake-1) -- [v1.16.6](#v1166) - - [Downloads for v1.16.6](#downloads-for-v1166) + - [API Change](#api-change-1) + - [Other (Bug, Cleanup or Flake)](#other-bug-cleanup-or-flake) +- [v1.16.7](#v1167) + - [Downloads for v1.16.7](#downloads-for-v1167) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.16.5](#changelog-since-v1165) -- [v1.16.5](#v1165) - - [Downloads for v1.16.5](#downloads-for-v1165) + - [Changelog since v1.16.6](#changelog-since-v1166) + - [Changes by Kind](#changes-by-kind-3) + - [Other (Bug, Cleanup or Flake)](#other-bug-cleanup-or-flake-1) +- [v1.16.6](#v1166) + - [Downloads for v1.16.6](#downloads-for-v1166) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) + - [Changelog since v1.16.5](#changelog-since-v1165) +- [v1.16.5](#v1165) + - [Downloads for v1.16.5](#downloads-for-v1165) + - [Client Binaries](#client-binaries-5) + - [Server Binaries](#server-binaries-5) + - [Node Binaries](#node-binaries-5) - [Changelog since v1.16.4](#changelog-since-v1164) - [Other notable changes](#other-notable-changes) - [v1.16.4](#v1164) - [Downloads for v1.16.4](#downloads-for-v1164) - - [Client Binaries](#client-binaries-5) - - [Server Binaries](#server-binaries-5) - - [Node Binaries](#node-binaries-5) + - [Client Binaries](#client-binaries-6) + - [Server Binaries](#server-binaries-6) + - [Node Binaries](#node-binaries-6) - [Changelog since v1.16.3](#changelog-since-v1163) - [Other notable changes](#other-notable-changes-1) - [v1.16.3](#v1163) - [Downloads for v1.16.3](#downloads-for-v1163) - - [Client Binaries](#client-binaries-6) - - [Server Binaries](#server-binaries-6) - - [Node Binaries](#node-binaries-6) + - [Client Binaries](#client-binaries-7) + - [Server Binaries](#server-binaries-7) + - [Node Binaries](#node-binaries-7) - [Changelog since v1.16.2](#changelog-since-v1162) - [Other notable changes](#other-notable-changes-2) - [v1.16.2](#v1162) - [Downloads for v1.16.2](#downloads-for-v1162) - - [Client Binaries](#client-binaries-7) - - [Server Binaries](#server-binaries-7) - - [Node Binaries](#node-binaries-7) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) - [Changelog since v1.16.1](#changelog-since-v1161) - [Other notable changes](#other-notable-changes-3) - [v1.16.1](#v1161) - [Downloads for v1.16.1](#downloads-for-v1161) - - [Client Binaries](#client-binaries-8) - - [Server Binaries](#server-binaries-8) - - [Node Binaries](#node-binaries-8) + - [Client Binaries](#client-binaries-9) + - [Server Binaries](#server-binaries-9) + - [Node Binaries](#node-binaries-9) - [Changelog since v1.16.0](#changelog-since-v1160) - [Other notable changes](#other-notable-changes-4) - [v1.16.0](#v1160) - [Downloads for v1.16.0](#downloads-for-v1160) - - [Client Binaries](#client-binaries-9) - - [Server Binaries](#server-binaries-9) - - [Node Binaries](#node-binaries-9) + - [Client Binaries](#client-binaries-10) + - [Server Binaries](#server-binaries-10) + - [Node Binaries](#node-binaries-10) - [Kubernetes v1.16.0 Release Notes](#kubernetes-v1160-release-notes) - [What’s New (Major Themes)](#what’s-new-major-themes) - [Additional Notable Feature Updates](#additional-notable-feature-updates) @@ -106,70 +121,156 @@ - [Storage](#storage-1) - [Testing](#testing) - [Windows](#windows) - - [Dependencies](#dependencies) - - [Changed](#changed) + - [Dependencies](#dependencies-1) + - [Changed](#changed-1) - [Unchanged](#unchanged) - - [Removed](#removed) + - [Removed](#removed-1) - [Detailed go Dependency Changes](#detailed-go-dependency-changes) - - [Added](#added) - - [Changed](#changed-1) - - [Removed](#removed-1) + - [Added](#added-1) + - [Changed](#changed-2) + - [Removed](#removed-2) - [v1.16.0-rc.2](#v1160-rc2) - [Downloads for v1.16.0-rc.2](#downloads-for-v1160-rc2) - - [Client Binaries](#client-binaries-10) - - [Server Binaries](#server-binaries-10) - - [Node Binaries](#node-binaries-10) + - [Client Binaries](#client-binaries-11) + - [Server Binaries](#server-binaries-11) + - [Node Binaries](#node-binaries-11) - [Changelog since v1.16.0-rc.1](#changelog-since-v1160-rc1) - [Other notable changes](#other-notable-changes-6) - [v1.16.0-rc.1](#v1160-rc1) - [Downloads for v1.16.0-rc.1](#downloads-for-v1160-rc1) - - [Client Binaries](#client-binaries-11) - - [Server Binaries](#server-binaries-11) - - [Node Binaries](#node-binaries-11) + - [Client Binaries](#client-binaries-12) + - [Server Binaries](#server-binaries-12) + - [Node Binaries](#node-binaries-12) - [Changelog since v1.16.0-beta.2](#changelog-since-v1160-beta2) - [Other notable changes](#other-notable-changes-7) - [v1.16.0-beta.2](#v1160-beta2) - [Downloads for v1.16.0-beta.2](#downloads-for-v1160-beta2) - - [Client Binaries](#client-binaries-12) - - [Server Binaries](#server-binaries-12) - - [Node Binaries](#node-binaries-12) + - [Client Binaries](#client-binaries-13) + - [Server Binaries](#server-binaries-13) + - [Node Binaries](#node-binaries-13) - [Changelog since v1.16.0-beta.1](#changelog-since-v1160-beta1) - [Other notable changes](#other-notable-changes-8) - [v1.16.0-beta.1](#v1160-beta1) - [Downloads for v1.16.0-beta.1](#downloads-for-v1160-beta1) - - [Client Binaries](#client-binaries-13) - - [Server Binaries](#server-binaries-13) - - [Node Binaries](#node-binaries-13) + - [Client Binaries](#client-binaries-14) + - [Server Binaries](#server-binaries-14) + - [Node Binaries](#node-binaries-14) - [Changelog since v1.16.0-alpha.3](#changelog-since-v1160-alpha3) - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-9) - [v1.16.0-alpha.3](#v1160-alpha3) - [Downloads for v1.16.0-alpha.3](#downloads-for-v1160-alpha3) - - [Client Binaries](#client-binaries-14) - - [Server Binaries](#server-binaries-14) - - [Node Binaries](#node-binaries-14) + - [Client Binaries](#client-binaries-15) + - [Server Binaries](#server-binaries-15) + - [Node Binaries](#node-binaries-15) - [Changelog since v1.16.0-alpha.2](#changelog-since-v1160-alpha2) - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-10) - [v1.16.0-alpha.2](#v1160-alpha2) - [Downloads for v1.16.0-alpha.2](#downloads-for-v1160-alpha2) - - [Client Binaries](#client-binaries-15) - - [Server Binaries](#server-binaries-15) - - [Node Binaries](#node-binaries-15) + - [Client Binaries](#client-binaries-16) + - [Server Binaries](#server-binaries-16) + - [Node Binaries](#node-binaries-16) - [Changelog since v1.16.0-alpha.1](#changelog-since-v1160-alpha1) - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-11) - [v1.16.0-alpha.1](#v1160-alpha1) - [Downloads for v1.16.0-alpha.1](#downloads-for-v1160-alpha1) - - [Client Binaries](#client-binaries-16) - - [Server Binaries](#server-binaries-16) - - [Node Binaries](#node-binaries-16) + - [Client Binaries](#client-binaries-17) + - [Server Binaries](#server-binaries-17) + - [Node Binaries](#node-binaries-17) - [Changelog since v1.15.0](#changelog-since-v1150) - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-12) +# v1.16.10 + + +## Downloads for v1.16.10 + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes.tar.gz) | 5925579630fa2c3f78c1e17b4c2aca3e23cad5db7721b34b9e8d8eb8a443d4ea4cb87369d9d254dfd35603a1f12825a1167ccd2b71e1e1f3eb7676ad4a143276 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-src.tar.gz) | 5d265121125e940ea43a54df7ef6ed072598d5d4dcfcbf7173ad252d016869db938c50865aaff31ef7ebd0399fa0de87b4248285b3c7f1abdf813cda0df585b2 + +### Client binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-darwin-386.tar.gz) | da77e3fc2bdcc5c6c6dd8c92c2f8fc23ea2974cb36e08316830631132005c03513296179b1ba5e77d9dc3346ce273207f9ae4ae822b13b1a361f6543a4df4bcc +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-darwin-amd64.tar.gz) | f56f42a525e723fdcb203d7671fc8586c45a9987e8f4ffbe9467beefe0af12f8158bba1257392f21cca3cf75c9f8db803d644135da5e2fb2cc74ced858c9f294 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-linux-386.tar.gz) | 10241604f0fec126f696f33963ec607abf378b44524f3dcc3ab26cfcad894d1ab1f3fa06af2ee6619ac3aa43eef6edb46bb7cdb3e5c9e813009a5373b99e1598 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-linux-amd64.tar.gz) | 455d8bd8881aa996dd35969b2448fa54ab15650ce2d58c19bf89ff8e11f287beb136ba64ca62d64c958a1227dfee1e94408941258f0b9421ef06bb48a09bbcd2 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-linux-arm.tar.gz) | a30b1840f7ca2f13468a142f69b1806cf9950d67f5c42cb30972c23704c4c66d47c16245096ddb3245ab9d7a002ae37a944822ef4d8d7d21a253f8cf59d861c8 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-linux-arm64.tar.gz) | 1247c48d1abe3d04d8ecc08c34bc627961cc2a830893a039366e36da66ce96431d2d9b8ebb0e4893260195ba702f8a5e9935ac5a043ea0a9b0e34b64682f394b +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-linux-ppc64le.tar.gz) | c793de5dbe35708ba7992cae395e3c70e48e5ecb57bba73d87496dbccde216b777f60ff41ff10d565e06f48bf18b72f97df12446722860a72a55c46dcdf8bd7e +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-linux-s390x.tar.gz) | a71420474b115e5114628b09744a1929163931550d5fbfffbc6c12c7531c6e9769ab5125148a72d151fe6392c8c96d6a4736348a67fa02e66ac8afac6defe13b +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-windows-386.tar.gz) | 42a44700becf4556473498c95561aced6704a49800bf1b96a776607903eca97f77aa208e29d99c963a7e00a8af72540c45572f1fad59d74f3736b180e60f7411 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-client-windows-amd64.tar.gz) | 0fffcf5012ae9495396c64a8b3bea3389456ba5844d44b5b8ae619752011101864eb7ed6ea6fe2fedeb8a07e36a097cd84b79a62b9004e457105f0c0895f7be8 + +### Server binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-server-linux-amd64.tar.gz) | bb2c68409878c1037f8d2924b010f6e13cb28969046faf75a5e9e16d90e8167223f9ecab0866ba1c3998360d1da87863d3626dada226b153733f39e6d53e710c +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-server-linux-arm.tar.gz) | 42dddd7699b77c12690b525dd9c0643e529589a84628c3fa6b7f8ffad064dc1fb93cd3e37aa606d113f4c18833ad6e404af86f0dba5b92238c2410f9bdeb16fa +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-server-linux-arm64.tar.gz) | 0c0e9b484576559eb2d894aef9b705472d31b459123ca4dec61fa0fd9a6d5ddb844003dfbbaa596e3d60fbabfdbd57424053a89c0d9969d181e5e9b820c49d86 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-server-linux-ppc64le.tar.gz) | 9a6c12f1c6a442d59f8eb2affa3c980e35c0e85a70ec97d565c660c4bf041257d98f8588280a1e4b28f9a2589c5f8f5ef105030a29271c4bd0faf67b79eb37b6 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-server-linux-s390x.tar.gz) | f4a272179806dc174177c4ae860a8f8dd67743ed1eedab2720761a008e8bf15646c39e0a8a176e980a2e43d2c8068f78f19c35295ceb097e40eea904d5e6c253 + +### Node binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-node-linux-amd64.tar.gz) | bcd8abebedbb20f93259ccfddefe4f7f5583d2572a8bdb011715b737c88927a89f90923856e43ed6b6b9d6fdd7ce6f348de5e632f63abe16ee66828a33842844 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-node-linux-arm.tar.gz) | 9089881d92640de936c07b25ed398e338d95a0b684891377835341e47f868fa0f3337ac8e93307937d2ccdb694da72fc6ca275f1bb2803ae55fe06c7a1d47cd2 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-node-linux-arm64.tar.gz) | b5844cfcbdc13888c3150348252cdf38c39a19fc0e69b1bff42178e10b608e8500e7e527a285c20be0e9ca1ea785c7ff9a198c9e151e7e9ebed694fc7780214c +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-node-linux-ppc64le.tar.gz) | bda2e59f704cb314397302a3365f8a2dcbd7663277ede11fe5f69c346ab5ef26103542849751a3c8cd2a72c9819cbc4900b52a0f941d47fb7d774caaa94fda24 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-node-linux-s390x.tar.gz) | 0574ccf4677f8333b6a64c91fcfc4e3d037eec05f3983df647d8d5505ef57a3c6d41e93fbb35ad1f228b3673ee0d6084610054e397714ba0bd86c949455122d9 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.10/kubernetes-node-windows-amd64.tar.gz) | e5a5bfb79f1e45f13da3dfaa13c44264e099d08c535d6a9c63819685be186f8a84a518468720cd56dc1434aec10d40aa4c929d62fdd7f43b749e05c7572fb2f9 + +## Changelog since v1.16.9 + +## Changes by Kind + +### API Change + - Fix bug where sending a status update completely wipes managedFields for some types. ([#90033](https://github.com/kubernetes/kubernetes/pull/90033), [@apelisse](https://github.com/apelisse)) [SIG API Machinery and Testing] + +### Bug or Regression + - Base-images: Update to kube-cross:v1.13.9-5 ([#90966](https://github.com/kubernetes/kubernetes/pull/90966), [@justaugustus](https://github.com/justaugustus)) [SIG Release] + - Fix HPA when using init containers and CRI-O ([#90900](https://github.com/kubernetes/kubernetes/pull/90900), [@joelsmith](https://github.com/joelsmith)) [SIG Node] + - Fix: Init containers are now considered for the calculation of resource requests when scheduling ([#90455](https://github.com/kubernetes/kubernetes/pull/90455), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] + - Fix: azure disk dangling attach issue which would cause API throttling ([#90749](https://github.com/kubernetes/kubernetes/pull/90749), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] + - Fix: get attach disk error due to missing item in max count table ([#89768](https://github.com/kubernetes/kubernetes/pull/89768), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] + - Fix: support removal of nodes backed by deleted non VMSS instances on Azure ([#91184](https://github.com/kubernetes/kubernetes/pull/91184), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] + - Fixes a bug defining a default value for a replicas field in a custom resource definition that has the scale subresource enabled ([#90022](https://github.com/kubernetes/kubernetes/pull/90022), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, CLI, Cloud Provider and Cluster Lifecycle] + - Pods that are considered for preemption and haven't started don't produce an error log. ([#90241](https://github.com/kubernetes/kubernetes/pull/90241), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] + - Provides a fix to allow a cluster in a private Azure cloud to authenticate to ACR in the same cloud. ([#90425](https://github.com/kubernetes/kubernetes/pull/90425), [@DavidParks8](https://github.com/DavidParks8)) [SIG Cloud Provider] + - Reduced frequency of DescribeVolumes calls of AWS API when attaching/detaching a volume. + Fixed "requested device X but found Y" attach error on AWS. ([#89894](https://github.com/kubernetes/kubernetes/pull/89894), [@johanneswuerbach](https://github.com/johanneswuerbach)) [SIG Cloud Provider] + - Scheduling failures due to no nodes available are now reported as unschedulable under ```schedule_attempts_total``` metric. ([#90989](https://github.com/kubernetes/kubernetes/pull/90989), [@ahg-g](https://github.com/ahg-g)) [SIG Scheduling] + +### Other (Cleanup or Flake) + - base-images: Use debian-base:v2.1.0 (update to Debian Buster, includes CVE fixes) + - base-images: Use debian-iptables:v12.1.0 (update to Debian Buster, includes CVE fixes and iptables-nft/iptables-legacy wrapper) ([#90940](https://github.com/kubernetes/kubernetes/pull/90940), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Cluster Lifecycle and Release] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- k8s.io/kube-openapi: 743ec37 → 594e756 + +### Removed +_Nothing has changed._ + + + # v1.16.9 [Documentation](https://docs.k8s.io) diff --git a/build/debian-hyperkube-base/.gitignore b/build/debian-hyperkube-base/.gitignore deleted file mode 100644 index 827612c4ee91d..0000000000000 --- a/build/debian-hyperkube-base/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/cni-tars diff --git a/build/debian-hyperkube-base/Dockerfile b/build/debian-hyperkube-base/Dockerfile deleted file mode 100644 index bdb982c3cfb70..0000000000000 --- a/build/debian-hyperkube-base/Dockerfile +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM BASEIMAGE - -# TODO(#69896): deprecate the shortened aliases in / -RUN ln -s /hyperkube /apiserver \ - && ln -s /hyperkube /cloud-controller-manager \ - && ln -s /hyperkube /controller-manager \ - && ln -s /hyperkube /kubectl \ - && ln -s /hyperkube /kubelet \ - && ln -s /hyperkube /proxy \ - && ln -s /hyperkube /scheduler \ - && ln -s /hyperkube /usr/local/bin/cloud-controller-manager \ - && ln -s /hyperkube /usr/local/bin/kube-apiserver \ - && ln -s /hyperkube /usr/local/bin/kube-controller-manager \ - && ln -s /hyperkube /usr/local/bin/kube-proxy \ - && ln -s /hyperkube /usr/local/bin/kube-scheduler \ - && ln -s /hyperkube /usr/local/bin/kubectl \ - && ln -s /hyperkube /usr/local/bin/kubelet - -RUN echo CACHEBUST>/dev/null && clean-install \ - bash - -# The samba-common, cifs-utils, and nfs-common packages depend on -# ucf, which itself depends on /bin/bash. -RUN echo "dash dash/sh boolean false" | debconf-set-selections -RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash - -RUN echo CACHEBUST>/dev/null && clean-install \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - xfsprogs \ - ebtables \ - ethtool \ - git \ - glusterfs-client \ - iptables \ - ipset \ - jq \ - kmod \ - openssh-client \ - netbase \ - nfs-common \ - socat \ - udev \ - util-linux - -COPY cni-bin/bin /opt/cni/bin diff --git a/build/debian-hyperkube-base/Makefile b/build/debian-hyperkube-base/Makefile deleted file mode 100644 index 57728a4cc602e..0000000000000 --- a/build/debian-hyperkube-base/Makefile +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Build the hyperkube base image. This image is used to build the hyperkube image. -# -# Usage: -# [ARCH=amd64] [REGISTRY="staging-k8s.gcr.io"] make (build|push) - -REGISTRY?=staging-k8s.gcr.io -IMAGE?=$(REGISTRY)/debian-hyperkube-base -TAG=0.12.1 -ARCH?=amd64 -ALL_ARCH = amd64 arm arm64 ppc64le s390x -CACHEBUST?=1 - -BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.4.1 -CNI_VERSION=v0.7.5 - -TEMP_DIR:=$(shell mktemp -d) -CNI_TARBALL=cni-plugins-$(ARCH)-$(CNI_VERSION).tgz - -# This option is for running docker manifest command -export DOCKER_CLI_EXPERIMENTAL := enabled - -SUDO=$(if $(filter 0,$(shell id -u)),,sudo) - -.PHONY: all build push clean all-build all-push-images all-push push-manifest - -all: all-push - -sub-build-%: - $(MAKE) ARCH=$* build - -all-build: $(addprefix sub-build-,$(ALL_ARCH)) - -sub-push-image-%: - $(MAKE) ARCH=$* push - -all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH)) - -all-push: all-push-images push-manifest - -push-manifest: - docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g") - @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done - docker manifest push --purge ${IMAGE}:${TAG} - -cni-tars/$(CNI_TARBALL): - mkdir -p cni-tars/ - cd cni-tars/ && curl -sSLO --retry 5 https://storage.googleapis.com/kubernetes-release/network-plugins/${CNI_TARBALL} - -clean: - rm -rf cni-tars/ - -build: cni-tars/$(CNI_TARBALL) - cp Dockerfile $(TEMP_DIR) - cd $(TEMP_DIR) && sed -i "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile - -ifeq ($(CACHEBUST),1) - cd ${TEMP_DIR} && sed -i.back "s|CACHEBUST|$(shell uuidgen)|g" Dockerfile -endif - - mkdir -p ${TEMP_DIR}/cni-bin/bin - tar -xz -C ${TEMP_DIR}/cni-bin/bin -f "cni-tars/${CNI_TARBALL}" - -ifneq ($(ARCH),amd64) - # Register /usr/bin/qemu-ARCH-static as the handler for non-x86 binaries in the kernel - $(SUDO) ../../third_party/multiarch/qemu-user-static/register/register.sh --reset -endif - docker build --pull -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR) - rm -rf $(TEMP_DIR) - -push: build - docker push $(IMAGE)-$(ARCH):$(TAG) diff --git a/build/debian-hyperkube-base/OWNERS b/build/debian-hyperkube-base/OWNERS deleted file mode 100644 index de80d6debf794..0000000000000 --- a/build/debian-hyperkube-base/OWNERS +++ /dev/null @@ -1,12 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - BenTheElder - - mkumatag - - tallclair -approvers: - - BenTheElder - - mkumatag - - tallclair -labels: -- sig/release diff --git a/build/debian-hyperkube-base/README.md b/build/debian-hyperkube-base/README.md deleted file mode 100644 index b37c04ddf2368..0000000000000 --- a/build/debian-hyperkube-base/README.md +++ /dev/null @@ -1,25 +0,0 @@ -### debian-hyperkube-base - -Serves as the base image for `k8s.gcr.io/hyperkube-${ARCH}` -images. - -This image is compiled for multiple architectures. - -#### How to release - -If you're editing the Dockerfile or some other thing, please bump the `TAG` in the Makefile. - -```console -# Build and push images for all the architectures -$ make all-push -# ---> staging-k8s.gcr.io/debian-hyperkube-base-amd64:TAG -# ---> staging-k8s.gcr.io/debian-hyperkube-base-arm:TAG -# ---> staging-k8s.gcr.io/debian-hyperkube-base-arm64:TAG -# ---> staging-k8s.gcr.io/debian-hyperkube-base-ppc64le:TAG -# ---> staging-k8s.gcr.io/debian-hyperkube-base-s390x:TAG -``` - -If you don't want to push the images, run `make all-build` instead - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/debian-hyperkube-base/README.md?pixel)]() diff --git a/build/dependencies.yaml b/build/dependencies.yaml index 9c1d9d1b05c3a..1843569b65fc3 100644 --- a/build/dependencies.yaml +++ b/build/dependencies.yaml @@ -1,9 +1,7 @@ dependencies: - name: "cni" - version: 0.7.5 + version: 0.8.6 refPaths: - - path: build/debian-hyperkube-base/Makefile - match: CNI_VERSION= - path: build/rpms/kubeadm.spec match: kubernetes-cni - path: build/rpms/kubelet.spec @@ -93,6 +91,14 @@ dependencies: - path: cluster/images/etcd-empty-dir-cleanup/Dockerfile match: us\.gcr\.io\/k8s-artifacts-prod\/build-image\/debian-base:v\d+\.\d+\.\d+ + - name: "k8s.gcr.io/debian-hyperkube-base: dependents" + version: 1.0.0 + refPaths: + - path: build/workspace.bzl + match: tag = + - path: cluster/images/hyperkube/Makefile + match: BASEIMAGE\?\=us\.gcr\.io\/k8s-artifacts-prod\/build-image\/debian-hyperkube-base-\$\(ARCH\):v\d+\.\d+\.\d+ + - name: "k8s.gcr.io/debian-iptables: dependents" version: 12.1.0 refPaths: diff --git a/build/rpms/kubeadm.spec b/build/rpms/kubeadm.spec index 6c25ee992f424..a57c5851d6e60 100644 --- a/build/rpms/kubeadm.spec +++ b/build/rpms/kubeadm.spec @@ -5,7 +5,7 @@ License: ASL 2.0 Summary: Container Cluster Manager - Kubernetes Cluster Bootstrapping Tool Requires: kubelet >= 1.8.0 Requires: kubectl >= 1.8.0 -Requires: kubernetes-cni >= 0.7.5 +Requires: kubernetes-cni >= 0.8.6 Requires: cri-tools >= 1.11.0 URL: https://kubernetes.io diff --git a/build/rpms/kubelet.spec b/build/rpms/kubelet.spec index 2603c4b07c813..df6618d1f459b 100644 --- a/build/rpms/kubelet.spec +++ b/build/rpms/kubelet.spec @@ -11,7 +11,7 @@ Requires: ebtables Requires: ethtool Requires: iproute Requires: iptables >= 1.4.21 -Requires: kubernetes-cni >= 0.7.5 +Requires: kubernetes-cni >= 0.8.6 Requires: socat Requires: util-linux diff --git a/build/workspace.bzl b/build/workspace.bzl index bef0bb2fd7a81..399e5104e9978 100644 --- a/build/workspace.bzl +++ b/build/workspace.bzl @@ -17,13 +17,13 @@ load("//build:workspace_mirror.bzl", "mirror") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file") load("@io_bazel_rules_docker//container:container.bzl", "container_pull") -CNI_VERSION = "0.7.5" +CNI_VERSION = "0.8.6" _CNI_TARBALL_ARCH_SHA256 = { - "amd64": "3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64", - "arm": "0eb4a528b5b2e4ce23ebc96e41b2f5280d5a64d41eec8dd8b16c3d66aaa0f6b8", - "arm64": "7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded", - "ppc64le": "9164a26ed8dd398b2fe3b15d9d456271dfa59aa537528d10572ea9fa2cef7679", - "s390x": "415cdcf02c65c22f5b7e55b0ab61208a10f2b95a0c8310176c771d07a9f448cf", + "amd64": "994fbfcdbb2eedcfa87e48d8edb9bb365f4e2747a7e47658482556c12fd9b2f5", + "arm": "28e61b5847265135dc1ca397bf94322ecce4acab5c79cc7d360ca3f6a655bdb7", + "arm64": "43fbf750c5eccb10accffeeb092693c32b236fb25d919cf058c91a677822c999", + "ppc64le": "61d6c6c15d3e4fa3eb85d128c9c0ff2658f38e59047ae359be47d193c673e116", + "s390x": "ca126a3bd2cd8dff1c7bbfc3c69933b284c4e77614391c7e1f74b0851fc3b289", } CRI_TOOLS_VERSION = "1.14.0" @@ -56,7 +56,7 @@ def cni_tarballs(): name = "kubernetes_cni_%s" % arch, downloaded_file_path = "kubernetes_cni.tgz", sha256 = sha, - urls = mirror("https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-%s-v%s.tgz" % (arch, CNI_VERSION)), + urls = ["https://storage.googleapis.com/k8s-artifacts-cni/release/v%s/cni-plugins-linux-%s-v%s.tgz" % (CNI_VERSION, arch, CNI_VERSION)], ) def cri_tarballs(): @@ -96,13 +96,18 @@ _DEBIAN_IPTABLES_DIGEST = { "s390x": "sha256:1b91a2788750552913377bf1bc99a095544dfb523d80a55674003c974c8e0905", } +# Use skopeo to find these values: https://github.com/containers/skopeo +# +# Example +# Manifest: skopeo inspect docker://gcr.io/k8s-staging-build-image/debian-hyperkube-base:v1.0.0 +# Arches: skopeo inspect --raw docker://gcr.io/k8s-staging-build-image/debian-hyperkube-base:v1.0.0 _DEBIAN_HYPERKUBE_BASE_DIGEST = { - "manifest": "sha256:8cabe02be6e86685d8860b7ace7c7addc9591a339728703027a4854677f1c772", - "amd64": "sha256:5d4ea2fb5fbe9a9a9da74f67cf2faefc881968bc39f2ac5d62d9167e575812a1", - "arm": "sha256:73260814af61522ff6aa48291df457d3bb0a91c4bf72e7cfa51fbaf03eb65fae", - "arm64": "sha256:78eeb1a31eef7c16f954444d64636d939d89307e752964ad6d9d06966c722da3", - "ppc64le": "sha256:92857d647abe8d9c7b4d7160cd5699112afc12fde369082a8ed00688b17928a9", - "s390x": "sha256:c11d74fa0538c67238576c247bfaddf95ebaa90cd03cb4d2f2ac3c6ebe0441e2", + "manifest": "sha256:8c8d854d868fb08352f73dda94f9e0b998c7318b48ddc587a355d0cbaf687f14", + "amd64": "sha256:73a8cb2bfd6707c8ed70c252e97bdccad8bc265a9a585db12b8e3dac50d6cd2a", + "arm": "sha256:aee7c2958c6e0de896995e8b04c8173fd0a7bbb16cddb1f4668e3ae010b8c786", + "arm64": "sha256:a74fc6d690e5c5e393fd509f50d7203fa5cd19bfbb127d4a3a996fd1ebcf35c4", + "ppc64le": "sha256:54afd9a85d6ecbe0792496f36012e7902601fb8084347cdc195c8b0561da39a3", + "s390x": "sha256:405a94e6b82f2eb89c773e0e9f6105eb73cde5605d4508ae36a150d922fe1c66", } def _digest(d, arch): @@ -137,9 +142,10 @@ def debian_image_dependencies(): name = "debian-hyperkube-base-" + arch, architecture = arch, digest = _digest(_DEBIAN_HYPERKUBE_BASE_DIGEST, arch), - registry = "k8s.gcr.io", + registry = "us.gcr.io/k8s-artifacts-prod/build-image", repository = "debian-hyperkube-base", - tag = "0.12.1", # ignored, but kept here for documentation + # Ensure the digests above are updated to match a new tag + tag = "v1.0.0", # ignored, but kept here for documentation ) def etcd_tarballs(): diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index ff6ac05e9bf3b..438661b6b95cc 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -297,9 +297,9 @@ NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}" NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}" NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}" -CNI_STORAGE_PATH="${CNI_STORAGE_PATH:-https://storage.googleapis.com/kubernetes-release/network-plugins}" -CNI_VERSION="${CNI_VERSION:-}" -CNI_SHA1="${CNI_SHA1:-}" +CNI_SHA1=${CNI_SHA1:-} +CNI_TAR_PREFIX=${CNI_TAR_PREFIX:-cni-plugins-linux-amd64-} +CNI_STORAGE_URL_BASE=${CNI_STORAGE_URL_BASE:-https://storage.googleapis.com/k8s-artifacts-cni/release} # Optional: Create autoscaler for cluster's nodes. ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 56edd167a7802..f92c424d0ec43 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -328,9 +328,9 @@ NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}" NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}" NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}" -CNI_STORAGE_PATH="${CNI_STORAGE_PATH:-https://storage.googleapis.com/kubernetes-release/network-plugins}" -CNI_VERSION="${CNI_VERSION:-}" -CNI_SHA1="${CNI_SHA1:-}" +CNI_SHA1=${CNI_SHA1:-} +CNI_TAR_PREFIX=${CNI_TAR_PREFIX:-cni-plugins-linux-amd64-} +CNI_STORAGE_URL_BASE=${CNI_STORAGE_URL_BASE:-https://storage.googleapis.com/k8s-artifacts-cni/release} # Optional: Create autoscaler for cluster's nodes. ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index d3fbfe3c95322..d8a4c124ce84e 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -24,8 +24,8 @@ set -o nounset set -o pipefail ### Hardcoded constants -DEFAULT_CNI_VERSION="v0.7.5" -DEFAULT_CNI_SHA1="52e9d2de8a5f927307d9397308735658ee44ab8d" +DEFAULT_CNI_VERSION="v0.8.6" +DEFAULT_CNI_SHA1="a31251105250279fe57b4474d91d2db1d4d48b5a" DEFAULT_NPD_VERSION="v0.7.1" DEFAULT_NPD_SHA1="a9cae965973d586bf5206ad4fe5aae07e6bfd154" DEFAULT_CRICTL_VERSION="v1.14.0" @@ -236,19 +236,23 @@ function install-node-problem-detector { function install-cni-binaries { if [[ -n "${CNI_VERSION:-}" ]]; then - local -r cni_tar="cni-plugins-amd64-${CNI_VERSION}.tgz" + local -r cni_version="${CNI_VERSION}" local -r cni_sha1="${CNI_SHA1}" else - local -r cni_tar="cni-plugins-amd64-${DEFAULT_CNI_VERSION}.tgz" + local -r cni_version="${DEFAULT_CNI_VERSION}" local -r cni_sha1="${DEFAULT_CNI_SHA1}" fi + + local -r cni_tar="${CNI_TAR_PREFIX}${cni_version}.tgz" + local -r cni_url="${CNI_STORAGE_URL_BASE}/${cni_version}/${cni_tar}" + if is-preloaded "${cni_tar}" "${cni_sha1}"; then echo "${cni_tar} is preloaded." return fi echo "Downloading cni binaries" - download-or-bust "${cni_sha1}" "${CNI_STORAGE_PATH}/${cni_tar}" + download-or-bust "${cni_sha1}" "${cni_url}" local -r cni_dir="${KUBE_HOME}/cni" mkdir -p "${cni_dir}/bin" tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 460e24d677240..2928b17fc9993 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -1140,7 +1140,8 @@ NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-}) NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-}) NODE_PROBLEM_DETECTOR_RELEASE_PATH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}) NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS: $(yaml-quote ${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}) -CNI_STORAGE_PATH: $(yaml-quote ${CNI_STORAGE_PATH:-}) +CNI_STORAGE_URL_BASE: $(yaml-quote ${CNI_STORAGE_URL_BASE:-}) +CNI_TAR_PREFIX: $(yaml-quote ${CNI_TAR_PREFIX:-}) CNI_VERSION: $(yaml-quote ${CNI_VERSION:-}) CNI_SHA1: $(yaml-quote ${CNI_SHA1:-}) ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false}) diff --git a/cluster/images/hyperkube/Makefile b/cluster/images/hyperkube/Makefile index 340f85e82e97a..b49a94bd049a6 100644 --- a/cluster/images/hyperkube/Makefile +++ b/cluster/images/hyperkube/Makefile @@ -22,7 +22,7 @@ ARCH?=amd64 OUT_DIR?=_output HYPERKUBE_BIN?=$(shell pwd)/../../../$(OUT_DIR)/dockerized/bin/linux/$(ARCH)/hyperkube -BASEIMAGE=k8s.gcr.io/debian-hyperkube-base-$(ARCH):0.12.1 +BASEIMAGE?=us.gcr.io/k8s-artifacts-prod/build-image/debian-hyperkube-base-$(ARCH):v1.0.0 TEMP_DIR:=$(shell mktemp -d -t hyperkubeXXXXXX) all: build diff --git a/pkg/kubelet/apis/podresources/server.go b/pkg/kubelet/apis/podresources/server.go index f39e2b26ce0cb..63ac1b9470c65 100644 --- a/pkg/kubelet/apis/podresources/server.go +++ b/pkg/kubelet/apis/podresources/server.go @@ -26,6 +26,7 @@ import ( // DevicesProvider knows how to provide the devices used by the given container type DevicesProvider interface { GetDevices(podUID, containerName string) []*v1alpha1.ContainerDevices + UpdateAllocatedDevices() } // PodsProvider knows how to provide the pods admitted by the node @@ -52,6 +53,7 @@ func NewPodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesPro func (p *podResourcesServer) List(ctx context.Context, req *v1alpha1.ListPodResourcesRequest) (*v1alpha1.ListPodResourcesResponse, error) { pods := p.podsProvider.GetPods() podResources := make([]*v1alpha1.PodResources, len(pods)) + p.devicesProvider.UpdateAllocatedDevices() for i, pod := range pods { pRes := v1alpha1.PodResources{ diff --git a/pkg/kubelet/apis/podresources/server_test.go b/pkg/kubelet/apis/podresources/server_test.go index 60c14d4c954a5..b292667e0ab72 100644 --- a/pkg/kubelet/apis/podresources/server_test.go +++ b/pkg/kubelet/apis/podresources/server_test.go @@ -42,6 +42,10 @@ func (m *mockProvider) GetDevices(podUID, containerName string) []*v1alpha1.Cont return args.Get(0).([]*v1alpha1.ContainerDevices) } +func (m *mockProvider) UpdateAllocatedDevices() { + m.Called() +} + func TestListPodResources(t *testing.T) { podName := "pod-name" podNamespace := "pod-namespace" @@ -140,6 +144,7 @@ func TestListPodResources(t *testing.T) { m := new(mockProvider) m.On("GetPods").Return(tc.pods) m.On("GetDevices", string(podUID), containerName).Return(tc.devices) + m.On("UpdateAllocatedDevices").Return() server := NewPodResourcesServer(m, m) resp, err := server.List(context.TODO(), &v1alpha1.ListPodResourcesRequest{}) if err != nil { diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index 40ed8bd21ec6e..4b513ba6e8e55 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -112,6 +112,9 @@ type ContainerManager interface { // GetTopologyPodAdmitHandler returns an instance of the TopologyManager for Pod Admission GetTopologyPodAdmitHandler() topologymanager.Manager + + // UpdateAllocatedDevices frees any Devices that are bound to terminated pods. + UpdateAllocatedDevices() } type NodeConfig struct { diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 1baa280768f4d..049f44b30e20e 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -918,3 +918,7 @@ func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podr func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool { return cm.deviceManager.ShouldResetExtendedResourceCapacity() } + +func (cm *containerManagerImpl) UpdateAllocatedDevices() { + cm.deviceManager.UpdateAllocatedDevices() +} diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 7d6a214f4b74b..c21e8cce0dc88 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -121,6 +121,10 @@ func (cm *containerManagerStub) GetTopologyPodAdmitHandler() topologymanager.Man return nil } +func (cm *containerManagerStub) UpdateAllocatedDevices() { + return +} + func NewStubContainerManager() ContainerManager { return &containerManagerStub{shouldResetExtendedResourceCapacity: false} } diff --git a/pkg/kubelet/cm/container_manager_windows.go b/pkg/kubelet/cm/container_manager_windows.go index 55f172171e6dc..b4616d7405882 100644 --- a/pkg/kubelet/cm/container_manager_windows.go +++ b/pkg/kubelet/cm/container_manager_windows.go @@ -180,3 +180,7 @@ func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool { func (cm *containerManagerImpl) GetTopologyPodAdmitHandler() topologymanager.Manager { return nil } + +func (cm *containerManagerImpl) UpdateAllocatedDevices() { + return +} diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go index 053ec62161821..37a4411570df7 100644 --- a/pkg/kubelet/cm/devicemanager/manager.go +++ b/pkg/kubelet/cm/devicemanager/manager.go @@ -586,9 +586,9 @@ func (m *ManagerImpl) readCheckpoint() error { return nil } -// updateAllocatedDevices gets a list of active pods and then frees any Devices that are bound to -// terminated pods. Returns error on failure. -func (m *ManagerImpl) updateAllocatedDevices(activePods []*v1.Pod) { +// UpdateAllocatedDevices frees any Devices that are bound to terminated pods. +func (m *ManagerImpl) UpdateAllocatedDevices() { + activePods := m.activePods() if !m.sourcesReady.AllReady() { return } @@ -764,7 +764,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont // Updates allocatedDevices to garbage collect any stranded resources // before doing the device plugin allocation. if !allocatedDevicesUpdated { - m.updateAllocatedDevices(m.activePods()) + m.UpdateAllocatedDevices() allocatedDevicesUpdated = true } allocDevices, err := m.devicesToAllocate(podUID, contName, resource, needed, devicesToReuse[resource]) @@ -779,7 +779,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont // Manager.Allocate involves RPC calls to device plugin, which // could be heavy-weight. Therefore we want to perform this operation outside // mutex lock. Note if Allocate call fails, we may leave container resources - // partially allocated for the failed container. We rely on updateAllocatedDevices() + // partially allocated for the failed container. We rely on UpdateAllocatedDevices() // to garbage collect these resources later. Another side effect is that if // we have X resource A and Y resource B in total, and two containers, container1 // and container2 both require X resource A and Y resource B. Both allocation diff --git a/pkg/kubelet/cm/devicemanager/manager_stub.go b/pkg/kubelet/cm/devicemanager/manager_stub.go index a22e01b29fa3a..dd179d9858030 100644 --- a/pkg/kubelet/cm/devicemanager/manager_stub.go +++ b/pkg/kubelet/cm/devicemanager/manager_stub.go @@ -78,3 +78,8 @@ func (h *ManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevice func (h *ManagerStub) ShouldResetExtendedResourceCapacity() bool { return false } + +// UpdateAllocatedDevices returns nothing +func (h *ManagerStub) UpdateAllocatedDevices() { + return +} diff --git a/pkg/kubelet/cm/devicemanager/topology_hints.go b/pkg/kubelet/cm/devicemanager/topology_hints.go index 38ae0e166f7aa..7c5000787abee 100644 --- a/pkg/kubelet/cm/devicemanager/topology_hints.go +++ b/pkg/kubelet/cm/devicemanager/topology_hints.go @@ -28,6 +28,10 @@ import ( // ensures the Device Manager is consulted when Topology Aware Hints for each // container are created. func (m *ManagerImpl) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint { + // Garbage collect any stranded device resources before providing TopologyHints + m.UpdateAllocatedDevices() + + // Loop through all device resources and generate TopologyHints for them.. deviceHints := make(map[string][]topologymanager.TopologyHint) for resourceObj, requestedObj := range container.Resources.Limits { @@ -67,7 +71,7 @@ func (m *ManagerImpl) deviceHasTopologyAlignment(resource string) bool { func (m *ManagerImpl) getAvailableDevices(resource string) sets.String { // Gets Devices in use. - m.updateAllocatedDevices(m.activePods()) + m.UpdateAllocatedDevices() // Strip all devices in use from the list of healthy ones. return m.healthyDevices[resource].Difference(m.allocatedDevices[resource]) } diff --git a/pkg/kubelet/cm/devicemanager/types.go b/pkg/kubelet/cm/devicemanager/types.go index 4d7c9b8af9eaf..c1f645fcbccc8 100644 --- a/pkg/kubelet/cm/devicemanager/types.go +++ b/pkg/kubelet/cm/devicemanager/types.go @@ -68,6 +68,9 @@ type Manager interface { // TopologyManager HintProvider provider indicates the Device Manager implements the Topology Manager Interface // and is consulted to make Topology aware resource alignments GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint + + // UpdateAllocatedDevices frees any Devices that are bound to terminated pods. + UpdateAllocatedDevices() } // DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices. diff --git a/pkg/kubelet/kubelet_network_linux.go b/pkg/kubelet/kubelet_network_linux.go index 1c9ad46b9890f..d18ab75a05369 100644 --- a/pkg/kubelet/kubelet_network_linux.go +++ b/pkg/kubelet/kubelet_network_linux.go @@ -68,6 +68,22 @@ func (kl *Kubelet) syncNetworkUtil() { klog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err) return } + + // drop all non-local packets to localhost if they're not part of an existing + // forwarded connection. See #90259 + if !kl.iptClient.IsIpv6() { // ipv6 doesn't have this issue + if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain, + "-m", "comment", "--comment", "block incoming localnet connections", + "--dst", "127.0.0.0/8", + "!", "--src", "127.0.0.0/8", + "-m", "conntrack", + "!", "--ctstate", "RELATED,ESTABLISHED,DNAT", + "-j", "DROP"); err != nil { + klog.Errorf("Failed to ensure rule to drop invalid localhost packets in %v chain %v: %v", utiliptables.TableFilter, KubeFirewallChain, err) + return + } + } + if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainOutput, "-j", string(KubeFirewallChain)); err != nil { klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err) return diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 1e3c48eb34669..82e5fac58434d 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -24,6 +24,7 @@ import ( "io/ioutil" "os" "path" + "path/filepath" "time" v1 "k8s.io/api/core/v1" @@ -518,7 +519,8 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, pod.UID, newMapperErr) } - checkPath, _ = volumeMapper.GetPodDeviceMapPath() + mapDir, linkName := volumeMapper.GetPodDeviceMapPath() + checkPath = filepath.Join(mapDir, linkName) } else { var err error volumeMounter, err = plugin.NewMounter( diff --git a/pkg/registry/rbac/BUILD b/pkg/registry/rbac/BUILD index e44ab456f1b73..c111ebfff6766 100644 --- a/pkg/registry/rbac/BUILD +++ b/pkg/registry/rbac/BUILD @@ -57,5 +57,7 @@ go_test( "//pkg/apis/core/helper:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", ], ) diff --git a/pkg/registry/rbac/helpers.go b/pkg/registry/rbac/helpers.go index 0e10a65b855cf..76f7e7eee8b0d 100644 --- a/pkg/registry/rbac/helpers.go +++ b/pkg/registry/rbac/helpers.go @@ -44,6 +44,7 @@ func IsOnlyMutatingGCFields(obj, old runtime.Object, equalities conversion.Equal copiedMeta.SetOwnerReferences(oldMeta.GetOwnerReferences()) copiedMeta.SetFinalizers(oldMeta.GetFinalizers()) copiedMeta.SetSelfLink(oldMeta.GetSelfLink()) + copiedMeta.SetManagedFields(oldMeta.GetManagedFields()) return equalities.DeepEqual(copied, old) } diff --git a/pkg/registry/rbac/helpers_test.go b/pkg/registry/rbac/helpers_test.go index 755b82a026d3d..7a9cca5978827 100644 --- a/pkg/registry/rbac/helpers_test.go +++ b/pkg/registry/rbac/helpers_test.go @@ -17,12 +17,16 @@ limitations under the License. package rbac import ( + "reflect" "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" kapi "k8s.io/kubernetes/pkg/apis/core" kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" + + fuzz "github.com/google/gofuzz" ) func newPod() *kapi.Pod { @@ -53,6 +57,22 @@ func TestIsOnlyMutatingGCFields(t *testing.T) { }, expected: true, }, + { + name: "different managedFields", + obj: func() runtime.Object { + return newPod() + }, + old: func() runtime.Object { + obj := newPod() + obj.ManagedFields = []metav1.ManagedFieldsEntry{ + { + Manager: "manager", + }, + } + return obj + }, + expected: true, + }, { name: "only annotations", obj: func() runtime.Object { @@ -150,3 +170,33 @@ func TestIsOnlyMutatingGCFields(t *testing.T) { } } } + +func TestNewMetadataFields(t *testing.T) { + f := fuzz.New().NilChance(0.0).NumElements(1, 1) + for i := 0; i < 100; i++ { + objMeta := metav1.ObjectMeta{} + f.Fuzz(&objMeta) + objMeta.Name = "" + objMeta.GenerateName = "" + objMeta.Namespace = "" + objMeta.SelfLink = "" + objMeta.UID = types.UID("") + objMeta.ResourceVersion = "" + objMeta.Generation = 0 + objMeta.CreationTimestamp = metav1.Time{} + objMeta.DeletionTimestamp = nil + objMeta.DeletionGracePeriodSeconds = nil + objMeta.Labels = nil + objMeta.Annotations = nil + objMeta.OwnerReferences = nil + objMeta.Finalizers = nil + objMeta.ClusterName = "" + objMeta.ManagedFields = nil + + if !reflect.DeepEqual(metav1.ObjectMeta{}, objMeta) { + t.Fatalf(`A new field was introduced in ObjectMeta, add the field to +IsOnlyMutatingGCFields if necessary, and update this test: +%#v`, objMeta) + } + } +} diff --git a/pkg/scheduler/eventhandlers.go b/pkg/scheduler/eventhandlers.go index 53462e221baf9..961d49ddc6cf4 100644 --- a/pkg/scheduler/eventhandlers.go +++ b/pkg/scheduler/eventhandlers.go @@ -332,8 +332,8 @@ func responsibleForPod(pod *v1.Pod, schedulerName string) bool { // skipPodUpdate checks whether the specified pod update should be ignored. // This function will return true if // - The pod has already been assumed, AND -// - The pod has only its ResourceVersion, Spec.NodeName and/or Annotations -// updated. +// - The pod has only its ResourceVersion, Spec.NodeName, Annotations, +// ManagedFields, Finalizers and/or Conditions updated. func (sched *Scheduler) skipPodUpdate(pod *v1.Pod) bool { // Non-assumed pods should never be skipped. isAssumed, err := sched.SchedulerCache.IsAssumedPod(pod) @@ -366,6 +366,10 @@ func (sched *Scheduler) skipPodUpdate(pod *v1.Pod) bool { // Annotations must be excluded for the reasons described in // https://github.com/kubernetes/kubernetes/issues/52914. p.Annotations = nil + // The following might be changed by external controllers, but they don't + // affect scheduling decisions. + p.Finalizers = nil + p.Status.Conditions = nil return p } assumedPodCopy, podCopy := f(assumedPod), f(pod) diff --git a/pkg/scheduler/eventhandlers_test.go b/pkg/scheduler/eventhandlers_test.go index 8d4de88f7c88a..427e923980ba6 100644 --- a/pkg/scheduler/eventhandlers_test.go +++ b/pkg/scheduler/eventhandlers_test.go @@ -103,6 +103,51 @@ func TestSkipPodUpdate(t *testing.T) { }, expected: false, }, + { + name: "with changes on Finalizers", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Finalizers: []string{"a", "b"}, + }, + }, + isAssumedPodFunc: func(*v1.Pod) bool { + return true + }, + getPodFunc: func(*v1.Pod) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Finalizers: []string{"c", "d"}, + }, + } + }, + expected: true, + }, + { + name: "with changes on Conditions", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + {Type: "foo"}, + }, + }, + }, + isAssumedPodFunc: func(*v1.Pod) bool { + return true + }, + getPodFunc: func(*v1.Pod) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + }, + } + }, + expected: true, + }, } for _, test := range table { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/volume/awsebs/aws_ebs_block.go b/pkg/volume/awsebs/aws_ebs_block.go index a991474a91fc8..9511393c5a510 100644 --- a/pkg/volume/awsebs/aws_ebs_block.go +++ b/pkg/volume/awsebs/aws_ebs_block.go @@ -25,6 +25,7 @@ import ( "strings" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" @@ -51,10 +52,10 @@ func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types. return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) } - return getVolumeSpecFromGlobalMapPath(globalMapPath) + return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) } -func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { +func getVolumeSpecFromGlobalMapPath(volumeName string, globalMapPath string) (*volume.Spec, error) { // Get volume spec information from globalMapPath // globalMapPath example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} @@ -68,6 +69,9 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) } block := v1.PersistentVolumeBlock awsVolume := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ diff --git a/pkg/volume/awsebs/aws_ebs_block_test.go b/pkg/volume/awsebs/aws_ebs_block_test.go index 05ed2fa7ea667..f2a49456b7805 100644 --- a/pkg/volume/awsebs/aws_ebs_block_test.go +++ b/pkg/volume/awsebs/aws_ebs_block_test.go @@ -52,16 +52,19 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) { expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath) //Bad Path - badspec, err := getVolumeSpecFromGlobalMapPath("") + badspec, err := getVolumeSpecFromGlobalMapPath("", "") if badspec != nil || err == nil { t.Fatalf("Expected not to get spec from GlobalMapPath but did") } // Good Path - spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath) + spec, err := getVolumeSpecFromGlobalMapPath("myVolume", expectedGlobalPath) if spec == nil || err != nil { t.Fatalf("Failed to get spec from GlobalMapPath: %v", err) } + if spec.PersistentVolume.Name != "myVolume" { + t.Errorf("Invalid PV name from GlobalMapPath spec: %s", spec.PersistentVolume.Name) + } if spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID != testVolName { t.Errorf("Invalid volumeID from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID) } diff --git a/pkg/volume/cinder/cinder_block.go b/pkg/volume/cinder/cinder_block.go index c03310aa7a451..483170ef28490 100644 --- a/pkg/volume/cinder/cinder_block.go +++ b/pkg/volume/cinder/cinder_block.go @@ -22,7 +22,8 @@ import ( "fmt" "path/filepath" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" @@ -53,10 +54,10 @@ func (plugin *cinderPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeNam return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) } - return getVolumeSpecFromGlobalMapPath(globalMapPath) + return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) } -func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { +func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) { // Get volume spec information from globalMapPath // globalMapPath example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} @@ -67,6 +68,9 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) } block := v1.PersistentVolumeBlock cinderVolume := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ Cinder: &v1.CinderPersistentVolumeSource{ diff --git a/pkg/volume/cinder/cinder_block_test.go b/pkg/volume/cinder/cinder_block_test.go index 3f9dbe6b99965..a28d64c0d1270 100644 --- a/pkg/volume/cinder/cinder_block_test.go +++ b/pkg/volume/cinder/cinder_block_test.go @@ -23,7 +23,7 @@ import ( "path/filepath" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" @@ -52,16 +52,19 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) { expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath) //Bad Path - badspec, err := getVolumeSpecFromGlobalMapPath("") + badspec, err := getVolumeSpecFromGlobalMapPath("", "") if badspec != nil || err == nil { t.Errorf("Expected not to get spec from GlobalMapPath but did") } // Good Path - spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath) + spec, err := getVolumeSpecFromGlobalMapPath("myVolume", expectedGlobalPath) if spec == nil || err != nil { t.Fatalf("Failed to get spec from GlobalMapPath: %v", err) } + if spec.PersistentVolume.Name != "myVolume" { + t.Errorf("Invalid PV name from GlobalMapPath spec: %s", spec.PersistentVolume.Name) + } if spec.PersistentVolume.Spec.Cinder.VolumeID != testVolName { t.Errorf("Invalid volumeID from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.Cinder.VolumeID) } diff --git a/pkg/volume/gcepd/gce_pd_block.go b/pkg/volume/gcepd/gce_pd_block.go index 671f5d7178d89..59f4e1c821b94 100644 --- a/pkg/volume/gcepd/gce_pd_block.go +++ b/pkg/volume/gcepd/gce_pd_block.go @@ -24,6 +24,7 @@ import ( "strconv" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" @@ -54,10 +55,10 @@ func (plugin *gcePersistentDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) } - return getVolumeSpecFromGlobalMapPath(globalMapPath) + return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) } -func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { +func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) { // Get volume spec information from globalMapPath // globalMapPath example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} @@ -68,6 +69,9 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) } block := v1.PersistentVolumeBlock gceVolume := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ diff --git a/pkg/volume/gcepd/gce_pd_block_test.go b/pkg/volume/gcepd/gce_pd_block_test.go index 6f992fbe0751c..6338a8a4156a6 100644 --- a/pkg/volume/gcepd/gce_pd_block_test.go +++ b/pkg/volume/gcepd/gce_pd_block_test.go @@ -52,16 +52,19 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) { expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath) //Bad Path - badspec, err := getVolumeSpecFromGlobalMapPath("") + badspec, err := getVolumeSpecFromGlobalMapPath("", "") if badspec != nil || err == nil { t.Errorf("Expected not to get spec from GlobalMapPath but did") } // Good Path - spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath) + spec, err := getVolumeSpecFromGlobalMapPath("myVolume", expectedGlobalPath) if spec == nil || err != nil { t.Fatalf("Failed to get spec from GlobalMapPath: %v", err) } + if spec.PersistentVolume.Name != "myVolume" { + t.Errorf("Invalid PV name from GlobalMapPath spec: %s", spec.PersistentVolume.Name) + } if spec.PersistentVolume.Spec.GCEPersistentDisk.PDName != testPdName { t.Errorf("Invalid pdName from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.GCEPersistentDisk.PDName) } diff --git a/pkg/volume/vsphere_volume/vsphere_volume_block.go b/pkg/volume/vsphere_volume/vsphere_volume_block.go index b660cd3cd4d58..8f1ffd3b3cff0 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_block.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_block.go @@ -23,7 +23,8 @@ import ( "path/filepath" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" @@ -49,10 +50,10 @@ func (plugin *vsphereVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, vo if len(globalMapPath) <= 1 { return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) } - return getVolumeSpecFromGlobalMapPath(globalMapPath) + return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath) } -func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { +func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) { // Construct volume spec from globalMapPath // globalMapPath example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} @@ -64,6 +65,9 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) } block := v1.PersistentVolumeBlock vsphereVolume := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ diff --git a/pkg/volume/vsphere_volume/vsphere_volume_block_test.go b/pkg/volume/vsphere_volume/vsphere_volume_block_test.go index 941202e84a803..3377b4c8b43eb 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_block_test.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_block_test.go @@ -23,7 +23,7 @@ import ( "path/filepath" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" @@ -51,16 +51,19 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) { expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath) // Bad Path - badspec, err := getVolumeSpecFromGlobalMapPath("") + badspec, err := getVolumeSpecFromGlobalMapPath("", "") if badspec != nil || err == nil { t.Errorf("Expected not to get spec from GlobalMapPath but did") } // Good Path - spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath) + spec, err := getVolumeSpecFromGlobalMapPath("myVolume", expectedGlobalPath) if spec == nil || err != nil { t.Fatalf("Failed to get spec from GlobalMapPath: %s", err) } + if spec.PersistentVolume.Name != "myVolume" { + t.Errorf("Invalid PV name from GlobalMapPath spec: %s", spec.PersistentVolume.Name) + } if spec.PersistentVolume.Spec.VsphereVolume.VolumePath != testVolumePath { t.Fatalf("Invalid volumePath from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.VsphereVolume.VolumePath) } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index 2743793dde2c4..fcd491f4c07ad 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -178,7 +178,7 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel default: allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`")) } - if fields.FieldsType != "FieldsV1" { + if len(fields.FieldsType) > 0 && fields.FieldsType != "FieldsV1" { allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`")) } } diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation_test.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation_test.go index 30d6289f8ee60..aa71a600ae21d 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation_test.go @@ -242,12 +242,8 @@ func TestValidateFieldManagerInvalid(t *testing.T) { } } -func TestValidateMangedFieldsInvalid(t *testing.T) { +func TestValidateManagedFieldsInvalid(t *testing.T) { tests := []metav1.ManagedFieldsEntry{ - { - Operation: metav1.ManagedFieldsOperationUpdate, - // FieldsType is missing - }, { Operation: metav1.ManagedFieldsOperationUpdate, FieldsType: "RandomVersion", @@ -274,6 +270,10 @@ func TestValidateMangedFieldsInvalid(t *testing.T) { func TestValidateMangedFieldsValid(t *testing.T) { tests := []metav1.ManagedFieldsEntry{ + { + Operation: metav1.ManagedFieldsOperationUpdate, + // FieldsType is missing + }, { Operation: metav1.ManagedFieldsOperationUpdate, FieldsType: "FieldsV1", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go index 6dc143a7b06cb..16ea26b413dcf 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -153,11 +153,7 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int return } - obj, err = scope.FieldManager.Update(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent())) - if err != nil { - scope.err(fmt.Errorf("failed to update object (Create for %v) managed fields: %v", scope.Kind, err), w, req) - return - } + obj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent())) } trace.Step("About to store object in database") diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go index 1eaf6351da73b..e671e87f676ab 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go @@ -18,6 +18,7 @@ package fieldmanager import ( "fmt" + "reflect" "time" "k8s.io/apimachinery/pkg/api/errors" @@ -34,6 +35,8 @@ import ( "sigs.k8s.io/yaml" ) +var atMostEverySecond = internal.NewAtMostEvery(time.Second) + // FieldManager updates the managed fields and merge applied // configurations. type FieldManager struct { @@ -94,25 +97,31 @@ func NewCRDFieldManager(models openapiproto.Models, objectConverter runtime.Obje func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (runtime.Object, error) { // If the object doesn't have metadata, we should just return without trying to // set the managedFields at all, so creates/updates/patches will work normally. - if _, err := meta.Accessor(newObj); err != nil { + newAccessor, err := meta.Accessor(newObj) + if err != nil { return newObj, nil } // First try to decode the managed fields provided in the update, // This is necessary to allow directly updating managed fields. - managed, err := internal.DecodeObjectManagedFields(newObj) - - // If the managed field is empty or we failed to decode it, - // let's try the live object. This is to prevent clients who - // don't understand managedFields from deleting it accidentally. - if err != nil || len(managed.Fields) == 0 { - managed, err = internal.DecodeObjectManagedFields(liveObj) + var managed internal.Managed + if isResetManagedFields(newAccessor.GetManagedFields()) { + managed = internal.NewEmptyManaged() + } else if managed, err = internal.DecodeObjectManagedFields(newAccessor.GetManagedFields()); err != nil || len(managed.Fields) == 0 { + liveAccessor, err := meta.Accessor(liveObj) if err != nil { - return nil, fmt.Errorf("failed to decode managed fields: %v", err) + return newObj, nil + } + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. + if managed, err = internal.DecodeObjectManagedFields(liveAccessor.GetManagedFields()); err != nil { + managed = internal.NewEmptyManaged() } } // if managed field is still empty, skip updating managed fields altogether if len(managed.Fields) == 0 { + newAccessor.SetManagedFields(nil) return newObj, nil } newObjVersioned, err := f.toVersioned(newObj) @@ -172,6 +181,40 @@ func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (r return newObj, nil } +// UpdateNoErrors is the same as Update, but it will not return +// errors. If an error happens, the object is returned with +// managedFields cleared. +func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { + obj, err := f.Update(liveObj, newObj, manager) + if err != nil { + atMostEverySecond.Do(func() { + klog.Errorf("[SHOULD NOT HAPPEN] failed to update managedFields for %v: %v", + newObj.GetObjectKind().GroupVersionKind(), + err) + }) + // Explicitly remove managedFields on failure, so that + // we can't have garbage in it. + internal.RemoveObjectManagedFields(newObj) + return newObj + } + return obj +} + +// Returns true if the managedFields indicate that the user is trying to +// reset the managedFields, i.e. if the list is non-nil but empty, or if +// the list has one empty item. +func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { + if len(managedFields) == 0 { + return managedFields != nil + } + + if len(managedFields) == 1 { + return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) + } + + return false +} + // Apply is used when server-side apply is called, as it merges the // object and update the managed fields. func (f *FieldManager) Apply(liveObj runtime.Object, patch []byte, fieldManager string, force bool) (runtime.Object, error) { @@ -182,7 +225,8 @@ func (f *FieldManager) Apply(liveObj runtime.Object, patch []byte, fieldManager } missingManagedFields := (len(accessor.GetManagedFields()) == 0) - managed, err := internal.DecodeObjectManagedFields(liveObj) + // Decode the managed fields in the live object, since it isn't allowed in the patch. + managed, err := internal.DecodeObjectManagedFields(accessor.GetManagedFields()) if err != nil { return nil, fmt.Errorf("failed to decode managed fields: %v", err) } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager_test.go index 7b747dcb084fb..6eddca6e97480 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager_test.go @@ -124,6 +124,7 @@ func TestApplyStripsFields(t *testing.T) { Manager: "update", Operation: metav1.ManagedFieldsOperationApply, APIVersion: "apps/v1", + FieldsType: "FieldsV1", }, }, ResourceVersion: "b", @@ -181,9 +182,7 @@ func TestVersionCheck(t *testing.T) { func TestApplyDoesNotStripLabels(t *testing.T) { f := NewTestFieldManager() - obj := &corev1.Pod{} - obj.ObjectMeta.ManagedFields = []metav1.ManagedFieldsEntry{{}} - + obj := &unstructured.Unstructured{} newObj, err := f.Apply(obj, []byte(`{ "apiVersion": "apps/v1", "kind": "Pod", @@ -507,3 +506,102 @@ func TestApplySuccessWithNoManagedFields(t *testing.T) { t.Fatalf("failed to apply object: %v", err) } } + +// Tests that one can reset the managedFields by sending either an empty +// list +func TestResetManagedFieldsEmptyList(t *testing.T) { + f := NewTestFieldManager() + + objBytes := []byte(`{ + "apiVersion": "apps/v1", + "kind": "Pod", + "metadata": { + "labels": { + "a": "b" + }, + } + }`) + obj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal(objBytes, &obj.Object); err != nil { + t.Fatalf("error decoding YAML: %v", err) + } + liveObj, err := f.Apply(&corev1.Pod{}, objBytes, "fieldmanager_test_apply", false) + if err != nil { + t.Fatalf("failed to apply object: %v", err) + } + + if err := yaml.Unmarshal([]byte(`{ + "apiVersion": "apps/v1", + "kind": "Pod", + "metadata": { + "managedFields": [], + "labels": { + "a": "b" + }, + } + }`), &obj.Object); err != nil { + t.Fatalf("error decoding YAML: %v", err) + } + liveObj, err = f.Update(liveObj, obj, "update_manager") + if err != nil { + t.Fatalf("failed to update with empty manager: %v", err) + } + + accessor, err := meta.Accessor(liveObj) + if err != nil { + t.Fatalf("couldn't get accessor: %v", err) + } + + if m := accessor.GetManagedFields(); len(m) != 0 { + t.Fatalf("failed to reset managedFields: %v", m) + } +} + +// Tests that one can reset the managedFields by sending either a list with one empty item. +func TestResetManagedFieldsEmptyItem(t *testing.T) { + f := NewTestFieldManager() + + objBytes := []byte(`{ + "apiVersion": "apps/v1", + "kind": "Pod", + "metadata": { + "labels": { + "a": "b" + }, + } + }`) + obj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal(objBytes, &obj.Object); err != nil { + t.Fatalf("error decoding YAML: %v", err) + } + liveObj, err := f.Apply(&corev1.Pod{}, objBytes, "fieldmanager_test_apply", false) + if err != nil { + t.Fatalf("failed to apply object: %v", err) + } + + if err := yaml.Unmarshal([]byte(`{ + "apiVersion": "apps/v1", + "kind": "Pod", + "metadata": { + "managedFields": [{}], + "labels": { + "a": "b" + }, + } + }`), &obj.Object); err != nil { + t.Fatalf("error decoding YAML: %v", err) + } + liveObj, err = f.Update(liveObj, obj, "update_manager") + if err != nil { + t.Fatalf("failed to update with empty manager: %v", err) + } + + accessor, err := meta.Accessor(liveObj) + if err != nil { + t.Fatalf("couldn't get accessor: %v", err) + } + + if m := accessor.GetManagedFields(); len(m) != 0 { + t.Fatalf("failed to reset managedFields: %v", m) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD index 6b57a3484b237..1db65e78efd39 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "atmostevery.go", "conflict.go", "fields.go", "gvkparser.go", @@ -34,6 +35,7 @@ go_library( go_test( name = "go_default_test", srcs = [ + "atmostevery_test.go", "conflict_test.go", "fields_test.go", "managedfields_test.go", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go new file mode 100644 index 0000000000000..b75ef7416e7b2 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + "time" +) + +// AtMostEvery will never run the method more than once every specified +// duration. +type AtMostEvery struct { + delay time.Duration + lastCall time.Time + mutex sync.Mutex +} + +// NewAtMostEvery creates a new AtMostEvery, that will run the method at +// most every given duration. +func NewAtMostEvery(delay time.Duration) *AtMostEvery { + return &AtMostEvery{ + delay: delay, + } +} + +// updateLastCall returns true if the lastCall time has been updated, +// false if it was too early. +func (s *AtMostEvery) updateLastCall() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if time.Since(s.lastCall) < s.delay { + return false + } + s.lastCall = time.Now() + return true +} + +// Do will run the method if enough time has passed, and return true. +// Otherwise, it does nothing and returns false. +func (s *AtMostEvery) Do(fn func()) bool { + if !s.updateLastCall() { + return false + } + fn() + return true +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery_test.go new file mode 100644 index 0000000000000..46fe4f7e86767 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/atmostevery_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal_test + +import ( + "testing" + "time" + + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" +) + +func TestAtMostEvery(t *testing.T) { + duration := time.Second + delay := 179 * time.Millisecond + atMostEvery := internal.NewAtMostEvery(delay) + count := 0 + exit := time.NewTicker(duration) + tick := time.NewTicker(2 * time.Millisecond) + defer exit.Stop() + defer tick.Stop() + + done := false + for !done { + select { + case <-exit.C: + done = true + case <-tick.C: + atMostEvery.Do(func() { + count++ + }) + } + } + + if expected := int(duration/delay) + 1; count != expected { + t.Fatalf("Function called %d times, should have been called exactly %d times", count, expected) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go index 1f0eb6d3a441a..8a8f4aac41069 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields.go @@ -34,6 +34,14 @@ type Managed struct { Times map[string]*metav1.Time } +// NewEmptyManaged creates an empty ManagedInterface. +func NewEmptyManaged() Managed { + return Managed{ + fieldpath.ManagedFields{}, + map[string]*metav1.Time{}, + } +} + // RemoveObjectManagedFields removes the ManagedFields from the object // before we merge so that it doesn't appear in the ManagedFields // recursively. @@ -46,16 +54,8 @@ func RemoveObjectManagedFields(obj runtime.Object) { } // DecodeObjectManagedFields extracts and converts the objects ManagedFields into a fieldpath.ManagedFields. -func DecodeObjectManagedFields(from runtime.Object) (Managed, error) { - if from == nil { - return Managed{}, nil - } - accessor, err := meta.Accessor(from) - if err != nil { - panic(fmt.Sprintf("couldn't get accessor: %v", err)) - } - - managed, err := decodeManagedFields(accessor.GetManagedFields()) +func DecodeObjectManagedFields(from []metav1.ManagedFieldsEntry) (Managed, error) { + managed, err := decodeManagedFields(from) if err != nil { return Managed{}, fmt.Errorf("failed to convert managed fields from API: %v", err) } @@ -83,7 +83,16 @@ func EncodeObjectManagedFields(obj runtime.Object, managed Managed) error { func decodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (managed Managed, err error) { managed.Fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) managed.Times = make(map[string]*metav1.Time, len(encodedManagedFields)) - for _, encodedVersionedSet := range encodedManagedFields { + + for i, encodedVersionedSet := range encodedManagedFields { + switch encodedVersionedSet.FieldsType { + case "FieldsV1": + // Valid case. + case "": + return Managed{}, fmt.Errorf("missing fieldsType in managed fields entry %d", i) + default: + return Managed{}, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i) + } manager, err := BuildManagerIdentifier(&encodedVersionedSet) if err != nil { return Managed{}, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields_test.go index 79e6c1464918a..8bd852c444f14 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal/managedfields_test.go @@ -27,6 +27,51 @@ import ( "sigs.k8s.io/yaml" ) +// TestHasFieldsType makes sure that we fail if we don't have a +// FieldsType set properly. +func TestHasFieldsType(t *testing.T) { + var unmarshaled []metav1.ManagedFieldsEntry + if err := yaml.Unmarshal([]byte(`- apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:field: {} + manager: foo + operation: Apply +`), &unmarshaled); err != nil { + t.Fatalf("did not expect yaml unmarshalling error but got: %v", err) + } + if _, err := decodeManagedFields(unmarshaled); err != nil { + t.Fatalf("did not expect decoding error but got: %v", err) + } + + // Invalid fieldsType V2. + if err := yaml.Unmarshal([]byte(`- apiVersion: v1 + fieldsType: FieldsV2 + fieldsV1: + f:field: {} + manager: foo + operation: Apply +`), &unmarshaled); err != nil { + t.Fatalf("did not expect yaml unmarshalling error but got: %v", err) + } + if _, err := decodeManagedFields(unmarshaled); err == nil { + t.Fatal("Expect decoding error but got none") + } + + // Missing fieldsType. + if err := yaml.Unmarshal([]byte(`- apiVersion: v1 + fieldsV1: + f:field: {} + manager: foo + operation: Apply +`), &unmarshaled); err != nil { + t.Fatalf("did not expect yaml unmarshalling error but got: %v", err) + } + if _, err := decodeManagedFields(unmarshaled); err == nil { + t.Fatal("Expect decoding error but got none") + } +} + // TestRoundTripManagedFields will roundtrip ManagedFields from the wire format // (api format) to the format used by sigs.k8s.io/structured-merge-diff and back func TestRoundTripManagedFields(t *testing.T) { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index 327978202739b..6e6d21feba78e 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -321,9 +321,7 @@ func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (r } if p.fieldManager != nil { - if objToUpdate, err = p.fieldManager.Update(currentObject, objToUpdate, managerOrUserAgent(p.options.FieldManager, p.userAgent)); err != nil { - return nil, fmt.Errorf("failed to update object (json PATCH for %v) managed fields: %v", p.kind, err) - } + objToUpdate = p.fieldManager.UpdateNoErrors(currentObject, objToUpdate, managerOrUserAgent(p.options.FieldManager, p.userAgent)) } return objToUpdate, nil } @@ -406,9 +404,7 @@ func (p *smpPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (ru } if p.fieldManager != nil { - if newObj, err = p.fieldManager.Update(currentObject, newObj, managerOrUserAgent(p.options.FieldManager, p.userAgent)); err != nil { - return nil, fmt.Errorf("failed to update object (smp PATCH for %v) managed fields: %v", p.kind, err) - } + newObj = p.fieldManager.UpdateNoErrors(currentObject, newObj, managerOrUserAgent(p.options.FieldManager, p.userAgent)) } return newObj, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go index b7c2dbcd4f5c4..24339e5a85d7d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -126,11 +126,7 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa transformers := []rest.TransformFunc{} if scope.FieldManager != nil { transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) { - obj, err := scope.FieldManager.Update(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())) - if err != nil { - return nil, fmt.Errorf("failed to update object (Update for %v) managed fields: %v", scope.Kind, err) - } - return obj, nil + return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil }) } if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/BUILD index a2c4539ad7436..5bfc0ceeaf996 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/BUILD @@ -6,12 +6,6 @@ load( "go_test", ) -go_test( - name = "go_default_test", - srcs = ["metrics_test.go"], - embed = [":go_default_library"], -) - go_library( name = "go_default_library", srcs = ["metrics.go"], @@ -43,3 +37,9 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["metrics_test.go"], + embed = [":go_default_library"], +) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 97993b1f5e044..dd1bac7891086 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -49,6 +49,8 @@ type resettableCollector interface { const ( APIServerComponent string = "apiserver" + OtherContentType string = "other" + OtherRequestMethod string = "other" ) /* @@ -214,6 +216,37 @@ var ( currentInflightRequests, requestTerminationsTotal, } + + // these are the known (e.g. whitelisted/known) content types which we will report for + // request metrics. Any other RFC compliant content types will be aggregated under 'unknown' + knownMetricContentTypes = utilsets.NewString( + "application/apply-patch+yaml", + "application/json", + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json", + "application/vnd.kubernetes.protobuf", + "application/vnd.kubernetes.protobuf;stream=watch", + "application/yaml", + "text/plain", + "text/plain;charset=utf-8") + // these are the valid request methods which we report in our metrics. Any other request methods + // will be aggregated under 'unknown' + validRequestMethods = utilsets.NewString( + "APPLY", + "CONNECT", + "CREATE", + "DELETE", + "DELETECOLLECTION", + "GET", + "LIST", + "PATCH", + "POST", + "PROXY", + "PUT", + "UPDATE", + "WATCH", + "WATCHLIST") ) const ( @@ -261,6 +294,10 @@ func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInf // translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. verb := canonicalVerb(strings.ToUpper(req.Method), scope) + // set verbs to a bounded set of known and expected verbs + if !validRequestMethods.Has(verb) { + verb = OtherRequestMethod + } if requestInfo.IsResourceRequest { requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc() } else { @@ -301,8 +338,9 @@ func MonitorRequest(req *http.Request, verb, group, version, resource, subresour client := "" elapsedMicroseconds := float64(elapsed / time.Microsecond) elapsedSeconds := elapsed.Seconds() - requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, client, contentType, codeToString(httpCode)).Inc() - deprecatedRequestCounter.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component, client, contentType, codeToString(httpCode)).Inc() + cleanedContentType := cleanContentType(contentType) + requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, client, cleanedContentType, codeToString(httpCode)).Inc() + deprecatedRequestCounter.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component, client, cleanedContentType, codeToString(httpCode)).Inc() requestLatencies.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component).Observe(elapsedSeconds) deprecatedRequestLatencies.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(elapsedMicroseconds) deprecatedRequestLatenciesSummary.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(elapsedMicroseconds) @@ -359,6 +397,19 @@ func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, c } } +// cleanContentType binds the contentType (for metrics related purposes) to a +// bounded set of known/expected content-types. +func cleanContentType(contentType string) string { + normalizedContentType := strings.ToLower(contentType) + if strings.HasSuffix(contentType, " stream=watch") || strings.HasSuffix(contentType, " charset=utf-8") { + normalizedContentType = strings.ReplaceAll(contentType, " ", "") + } + if knownMetricContentTypes.Has(normalizedContentType) { + return normalizedContentType + } + return OtherContentType +} + // CleanScope returns the scope of the request. func CleanScope(requestInfo *request.RequestInfo) string { if requestInfo.Namespace != "" { @@ -403,7 +454,10 @@ func cleanVerb(verb string, request *http.Request) string { if verb == "PATCH" && request.Header.Get("Content-Type") == string(types.ApplyPatchType) && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { reportedVerb = "APPLY" } - return reportedVerb + if validRequestMethods.Has(reportedVerb) { + return reportedVerb + } + return OtherRequestMethod } func cleanDryRun(u *url.URL) string { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go index 4c0a8aa5d27de..603c932c5b47f 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics_test.go @@ -16,7 +16,12 @@ limitations under the License. package metrics -import "testing" +import ( + "fmt" + "net/http" + "net/url" + "testing" +) func TestCleanUserAgent(t *testing.T) { panicBuf := []byte{198, 73, 129, 133, 90, 216, 104, 29, 13, 134, 209, 233, 30, 0, 22} @@ -52,3 +57,131 @@ func TestCleanUserAgent(t *testing.T) { } } } +func TestCleanVerb(t *testing.T) { + testCases := []struct { + desc string + initialVerb string + request *http.Request + expectedVerb string + }{ + { + desc: "An empty string should be designated as unknown", + initialVerb: "", + request: nil, + expectedVerb: "other", + }, + { + desc: "LIST should normally map to LIST", + initialVerb: "LIST", + request: nil, + expectedVerb: "LIST", + }, + { + desc: "LIST should be transformed to WATCH if we have the right query param on the request", + initialVerb: "LIST", + request: &http.Request{ + URL: &url.URL{ + RawQuery: "watch=true", + }, + }, + expectedVerb: "WATCH", + }, + { + desc: "LIST isn't transformed to WATCH if we have query params that do not include watch", + initialVerb: "LIST", + request: &http.Request{ + URL: &url.URL{ + RawQuery: "blah=asdf&something=else", + }, + }, + expectedVerb: "LIST", + }, + { + desc: "WATCHLIST should be transformed to WATCH", + initialVerb: "WATCHLIST", + request: nil, + expectedVerb: "WATCH", + }, + { + desc: "PATCH should be transformed to APPLY with the right content type", + initialVerb: "PATCH", + request: &http.Request{ + Header: http.Header{ + "Content-Type": []string{"application/apply-patch+yaml"}, + }, + }, + expectedVerb: "APPLY", + }, + { + desc: "PATCH shouldn't be transformed to APPLY without the right content type", + initialVerb: "PATCH", + request: nil, + expectedVerb: "PATCH", + }, + { + desc: "WATCHLIST should be transformed to WATCH", + initialVerb: "WATCHLIST", + request: nil, + expectedVerb: "WATCH", + }, + { + desc: "unexpected verbs should be designated as unknown", + initialVerb: "notValid", + request: nil, + expectedVerb: "other", + }, + } + for _, tt := range testCases { + t.Run(tt.initialVerb, func(t *testing.T) { + req := &http.Request{URL: &url.URL{}} + if tt.request != nil { + req = tt.request + } + cleansedVerb := cleanVerb(tt.initialVerb, req) + if cleansedVerb != tt.expectedVerb { + t.Errorf("Got %s, but expected %s", cleansedVerb, tt.expectedVerb) + } + }) + } +} + +func TestContentType(t *testing.T) { + testCases := []struct { + rawContentType string + expectedContentType string + }{ + { + rawContentType: "application/json", + expectedContentType: "application/json", + }, + { + rawContentType: "image/svg+xml", + expectedContentType: "other", + }, + { + rawContentType: "text/plain; charset=utf-8", + expectedContentType: "text/plain;charset=utf-8", + }, + { + rawContentType: "application/json;foo=bar", + expectedContentType: "other", + }, + { + rawContentType: "application/json;charset=hancoding", + expectedContentType: "other", + }, + { + rawContentType: "unknownbutvalidtype", + expectedContentType: "other", + }, + } + + for _, tt := range testCases { + t.Run(fmt.Sprintf("parse %s", tt.rawContentType), func(t *testing.T) { + cleansedContentType := cleanContentType(tt.rawContentType) + if cleansedContentType != tt.expectedContentType { + t.Errorf("Got %s, but expected %s", cleansedContentType, tt.expectedContentType) + } + }) + } +} diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go b/staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go index cacce27bc3515..35119e03e15e5 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go @@ -221,7 +221,7 @@ func (o *CertificateOptions) modifyCertificateCondition(builder *resource.Builde WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). ContinueOnError(). FilenameParam(false, &o.FilenameOptions). - ResourceNames("certificatesigningrequest", o.csrNames...). + ResourceNames("certificatesigningrequests.v1beta1.certificates.k8s.io", o.csrNames...). RequireObject(true). Flatten(). Latest(). @@ -231,7 +231,10 @@ func (o *CertificateOptions) modifyCertificateCondition(builder *resource.Builde return err } for i := 0; ; i++ { - csr := info.Object.(*certificatesv1beta1.CertificateSigningRequest) + csr, ok := info.Object.(*certificatesv1beta1.CertificateSigningRequest) + if !ok { + return fmt.Errorf("can only handle certificates.k8s.io/v1beta1 certificate signing requests") + } csr, hasCondition := modify(csr) if !hasCondition || force { csr, err = clientSet.CertificateSigningRequests().UpdateApproval(csr) diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go b/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go index 71a5dccace073..1d54ec20c3ce7 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go @@ -25,6 +25,7 @@ import ( "io" "net" "path" + "regexp" "sort" "strconv" "strings" @@ -1202,7 +1203,13 @@ func azToRegion(az string) (string, error) { if len(az) < 1 { return "", fmt.Errorf("invalid (empty) AZ") } - region := az[:len(az)-1] + + r := regexp.MustCompile(`^([a-zA-Z]+-)+\d+`) + region := r.FindString(az) + if region == "" { + return "", fmt.Errorf("invalid AZ: %s", az) + } + return region, nil } diff --git a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_test.go b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_test.go index 005198d71d764..bf3883f2e7a3c 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/aws/aws_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/aws/aws_test.go @@ -1971,3 +1971,23 @@ func newMockedFakeAWSServices(id string) *FakeAWSServices { s.elb = &MockedFakeELB{FakeELB: s.elb.(*FakeELB)} return s } + +func TestAzToRegion(t *testing.T) { + testCases := []struct { + az string + region string + }{ + {"us-west-2a", "us-west-2"}, + {"us-west-2-lax-1a", "us-west-2"}, + {"ap-northeast-2a", "ap-northeast-2"}, + {"us-gov-east-1a", "us-gov-east-1"}, + {"us-iso-east-1a", "us-iso-east-1"}, + {"us-isob-east-1a", "us-isob-east-1"}, + } + + for _, testCase := range testCases { + result, err := azToRegion(testCase.az) + assert.NoError(t, err) + assert.Equal(t, testCase.region, result) + } +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go index 418be9e3f4770..6af2d7baf1d55 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go @@ -73,7 +73,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N } if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) + metadata, err := az.metadata.GetMetadata(cacheReadTypeDefault) if err != nil { return nil, err } @@ -259,7 +259,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e } if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) + metadata, err := az.metadata.GetMetadata(cacheReadTypeDefault) if err != nil { return "", err } @@ -346,7 +346,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, } if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) + metadata, err := az.metadata.GetMetadata(cacheReadTypeDefault) if err != nil { return "", err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances_test.go index bf17e63473c4a..885b61983a1e3 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances_test.go @@ -351,5 +351,24 @@ func TestNodeAddresses(t *testing.T) { if !reflect.DeepEqual(ipAddresses, test.expected) { t.Errorf("Test [%s] unexpected ipAddresses: %s, expected %q", test.name, ipAddresses, test.expected) } + + // address should be get again from IMDS if it is not found in cache. + err = cloud.metadata.imsCache.Delete(metadataCacheKey) + if err != nil { + t.Errorf("Test [%s] unexpected error: %v", test.name, err) + } + ipAddresses, err = cloud.NodeAddresses(context.Background(), types.NodeName(test.nodeName)) + if test.expectError { + if err == nil { + t.Errorf("Test [%s] unexpected nil err", test.name) + } + } else { + if err != nil { + t.Errorf("Test [%s] unexpected error: %v", test.name, err) + } + } + if !reflect.DeepEqual(ipAddresses, test.expected) { + t.Errorf("Test [%s] unexpected ipAddresses: %s, expected %q", test.name, ipAddresses, test.expected) + } } } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 70fb51e3112f7..b24ac08881a93 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -407,7 +407,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L return nil, nil } isInternal := requiresInternalLoadBalancer(service) - lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service)) + lbFrontendIPConfigName := az.getFrontendIPConfigName(service) serviceName := getServiceName(service) for _, ipConfiguration := range *lb.FrontendIPConfigurations { if lbFrontendIPConfigName == *ipConfiguration.Name { @@ -697,7 +697,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, } lbName := *lb.Name klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb) - lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service)) + lbFrontendIPConfigName := az.getFrontendIPConfigName(service) lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName) lbBackendPoolName := getBackendPoolName(clusterName, service) lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName) @@ -1030,7 +1030,7 @@ func (az *Cloud) reconcileLoadBalancerRule( } for _, protocol := range protocols { - lbRuleName := az.getLoadBalancerRuleName(service, protocol, port.Port, subnet(service)) + lbRuleName := az.getLoadBalancerRuleName(service, protocol, port.Port) klog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(protocol) @@ -1084,15 +1084,9 @@ func (az *Cloud) reconcileLoadBalancerRule( BackendPort: to.Int32Ptr(port.Port), DisableOutboundSnat: to.BoolPtr(az.disableLoadBalancerOutboundSNAT()), EnableTCPReset: enableTCPReset, + EnableFloatingIP: to.BoolPtr(true), }, } - // LB does not support floating IPs for IPV6 rules - if utilnet.IsIPv6String(service.Spec.ClusterIP) { - expectedRule.BackendPort = to.Int32Ptr(port.NodePort) - expectedRule.EnableFloatingIP = to.BoolPtr(false) - } else { - expectedRule.EnableFloatingIP = to.BoolPtr(true) - } if protocol == v1.ProtocolTCP { expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go index ed9675d921538..62f1b1529ecd1 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go @@ -362,7 +362,7 @@ func TestEnsureLoadBalancerDeleted(t *testing.T) { }{ { desc: "external service should be created and deleted successfully", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), }, { desc: "internal service should be created and deleted successfully", @@ -607,7 +607,7 @@ func TestGetServiceLoadBalancer(t *testing.T) { }, }, }, - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), wantLB: false, expectedLB: &network.LoadBalancer{ Name: to.StringPtr("lb1"), @@ -628,7 +628,7 @@ func TestGetServiceLoadBalancer(t *testing.T) { }, { desc: "getServiceLoadBalancer shall report error if there're loadbalancer mode annotations on a standard lb", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), annotations: map[string]string{ServiceAnnotationLoadBalancerMode: "__auto__"}, sku: "standard", expectedExists: false, @@ -666,7 +666,7 @@ func TestGetServiceLoadBalancer(t *testing.T) { }, }, }, - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), annotations: map[string]string{ServiceAnnotationLoadBalancerMode: "__auto__"}, wantLB: true, expectedLB: &network.LoadBalancer{ @@ -682,7 +682,7 @@ func TestGetServiceLoadBalancer(t *testing.T) { }, { desc: "getServiceLoadBalancer shall create a new lb otherwise", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), expectedLB: &network.LoadBalancer{ Name: to.StringPtr("testCluster"), Location: to.StringPtr("westus"), @@ -829,7 +829,7 @@ func TestIsFrontendIPChanged(t *testing.T) { }, }, lbFrontendIPConfigName: "btest1-name", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), expectedFlag: false, expectedError: false, }, @@ -842,7 +842,7 @@ func TestIsFrontendIPChanged(t *testing.T) { }, }, lbFrontendIPConfigName: "btest1-name", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), loadBalancerIP: "1.1.1.1", exsistingPIPs: []network.PublicIPAddress{ { @@ -865,7 +865,7 @@ func TestIsFrontendIPChanged(t *testing.T) { }, }, lbFrontendIPConfigName: "btest1-name", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), loadBalancerIP: "1.1.1.1", exsistingPIPs: []network.PublicIPAddress{ { @@ -888,7 +888,7 @@ func TestIsFrontendIPChanged(t *testing.T) { }, }, lbFrontendIPConfigName: "btest1-name", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), loadBalancerIP: "1.1.1.1", exsistingPIPs: []network.PublicIPAddress{ { @@ -962,7 +962,7 @@ func TestDeterminePublicIPName(t *testing.T) { } for i, test := range testCases { az := getTestCloud() - service := getTestService("test1", v1.ProtocolTCP, nil, 80) + service := getTestService("test1", v1.ProtocolTCP, nil, false, 80) service.Spec.LoadBalancerIP = test.loadBalancerIP for _, existingPIP := range test.exsistingPIPs { _, err := az.PublicIPAddressesClient.CreateOrUpdate(context.TODO(), "rg", "test", existingPIP) @@ -988,12 +988,12 @@ func TestReconcileLoadBalancerRule(t *testing.T) { }{ { desc: "reconcileLoadBalancerRule shall return nil if wantLb is false", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), wantLb: false, }, { desc: "reconcileLoadBalancerRule shall return corresponding probe and lbRule(blb)", - service: getTestService("test1", v1.ProtocolTCP, map[string]string{"service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset": "true"}, 80), + service: getTestService("test1", v1.ProtocolTCP, map[string]string{"service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset": "true"}, false, 80), wantLb: true, expectedProbes: []network.Probe{ { @@ -1034,7 +1034,7 @@ func TestReconcileLoadBalancerRule(t *testing.T) { }, { desc: "reconcileLoadBalancerRule shall return corresponding probe and lbRule (slb without tcp reset)", - service: getTestService("test1", v1.ProtocolTCP, map[string]string{"service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset": "True"}, 80), + service: getTestService("test1", v1.ProtocolTCP, map[string]string{"service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset": "True"}, false, 80), loadBalancerSku: "standard", wantLb: true, expectedProbes: []network.Probe{ @@ -1076,7 +1076,7 @@ func TestReconcileLoadBalancerRule(t *testing.T) { }, { desc: "reconcileLoadBalancerRule shall return corresponding probe and lbRule(slb with tcp reset)", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), loadBalancerSku: "standard", wantLb: true, expectedProbes: []network.Probe{ @@ -1192,10 +1192,10 @@ func getTestLoadBalancer(name, clusterName, identifier *string, service v1.Servi } func TestReconcileLoadBalancer(t *testing.T) { - service1 := getTestService("test1", v1.ProtocolTCP, nil, 80) + service1 := getTestService("test1", v1.ProtocolTCP, nil, false, 80) basicLb1 := getTestLoadBalancer(to.StringPtr("lb1"), to.StringPtr("testCluster"), to.StringPtr("atest1"), service1, "Basic") - service2 := getTestService("test1", v1.ProtocolTCP, nil, 80) + service2 := getTestService("test1", v1.ProtocolTCP, nil, false, 80) basicLb2 := getTestLoadBalancer(to.StringPtr("lb1"), to.StringPtr("testCluster"), to.StringPtr("btest1"), service2, "Basic") basicLb2.Name = to.StringPtr("testCluster") basicLb2.FrontendIPConfigurations = &[]network.FrontendIPConfiguration{ @@ -1207,7 +1207,7 @@ func TestReconcileLoadBalancer(t *testing.T) { }, } - service3 := getTestService("test1", v1.ProtocolTCP, nil, 80) + service3 := getTestService("test1", v1.ProtocolTCP, nil, false, 80) modifiedLb1 := getTestLoadBalancer(to.StringPtr("testCluster"), to.StringPtr("testCluster"), to.StringPtr("atest1"), service3, "Basic") modifiedLb1.FrontendIPConfigurations = &[]network.FrontendIPConfiguration{ { @@ -1258,7 +1258,7 @@ func TestReconcileLoadBalancer(t *testing.T) { }, } - service4 := getTestService("test1", v1.ProtocolTCP, map[string]string{"service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset": "true"}, 80) + service4 := getTestService("test1", v1.ProtocolTCP, map[string]string{"service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset": "true"}, false, 80) existingSLB := getTestLoadBalancer(to.StringPtr("testCluster"), to.StringPtr("testCluster"), to.StringPtr("atest1"), service4, "Standard") existingSLB.FrontendIPConfigurations = &[]network.FrontendIPConfiguration{ { @@ -1310,7 +1310,7 @@ func TestReconcileLoadBalancer(t *testing.T) { }, } - service5 := getTestService("test1", v1.ProtocolTCP, nil, 80) + service5 := getTestService("test1", v1.ProtocolTCP, nil, false, 80) slb5 := getTestLoadBalancer(to.StringPtr("testCluster"), to.StringPtr("testCluster"), to.StringPtr("atest1"), service5, "Standard") slb5.FrontendIPConfigurations = &[]network.FrontendIPConfiguration{ { @@ -1364,7 +1364,7 @@ func TestReconcileLoadBalancer(t *testing.T) { }, } - service6 := getTestService("test1", v1.ProtocolUDP, nil, 80) + service6 := getTestService("test1", v1.ProtocolUDP, nil, false, 80) lb6 := getTestLoadBalancer(to.StringPtr("testCluster"), to.StringPtr("testCluster"), to.StringPtr("atest1"), service6, "basic") lb6.FrontendIPConfigurations = &[]network.FrontendIPConfiguration{} lb6.Probes = &[]network.Probe{} @@ -1383,7 +1383,7 @@ func TestReconcileLoadBalancer(t *testing.T) { }, } - service7 := getTestService("test1", v1.ProtocolUDP, nil, 80) + service7 := getTestService("test1", v1.ProtocolUDP, nil, false, 80) service7.Spec.HealthCheckNodePort = 10081 service7.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal lb7 := getTestLoadBalancer(to.StringPtr("testCluster"), to.StringPtr("testCluster"), to.StringPtr("atest1"), service7, "basic") @@ -1531,7 +1531,7 @@ func TestReconcileLoadBalancer(t *testing.T) { func TestGetServiceLoadBalancerStatus(t *testing.T) { az := getTestCloud() - service := getTestService("test1", v1.ProtocolTCP, nil, 80) + service := getTestService("test1", v1.ProtocolTCP, nil, false, 80) internalService := getInternalTestService("test1", 80) PIPClient := newFakeAzurePIPClient(az.Config.SubscriptionID) @@ -1628,7 +1628,7 @@ func TestGetServiceLoadBalancerStatus(t *testing.T) { }, { desc: "getServiceLoadBalancerStatus shall return nil if lb.FrontendIPConfigurations.name != " + - "az.getFrontendIPConfigName(service, subnet(service))", + "az.getFrontendIPConfigName(service)", service: &internalService, lb: &lb3, }, @@ -1690,25 +1690,25 @@ func TestReconcileSecurityGroup(t *testing.T) { }, { desc: "reconcileSecurityGroup shall report error if no such sg can be found", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), expectedError: true, }, { desc: "reconcileSecurityGroup shall report error if wantLb is true and lbIP is nil", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), wantLb: true, existingSgs: map[string]network.SecurityGroup{"nsg": {}}, expectedError: true, }, { desc: "reconcileSecurityGroup shall remain the existingSgs intact if nothing needs to be modified", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), existingSgs: map[string]network.SecurityGroup{"nsg": {}}, expectedSg: &network.SecurityGroup{}, }, { desc: "reconcileSecurityGroup shall delete unwanted sgs and create needed ones", - service: getTestService("test1", v1.ProtocolTCP, nil, 80), + service: getTestService("test1", v1.ProtocolTCP, nil, false, 80), existingSgs: map[string]network.SecurityGroup{"nsg": { Name: to.StringPtr("nsg"), SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{ @@ -1748,6 +1748,36 @@ func TestReconcileSecurityGroup(t *testing.T) { }, }, }, + { + desc: "reconcileSecurityGroup shall create sgs with correct destinationPrefix for IPv6", + service: getTestService("test1", v1.ProtocolTCP, nil, true, 80), + existingSgs: map[string]network.SecurityGroup{"nsg": { + Name: to.StringPtr("nsg"), + SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{}, + }}, + lbIP: to.StringPtr("fd00::eef0"), + wantLb: true, + expectedSg: &network.SecurityGroup{ + Name: to.StringPtr("nsg"), + SecurityGroupPropertiesFormat: &network.SecurityGroupPropertiesFormat{ + SecurityRules: &[]network.SecurityRule{ + { + Name: to.StringPtr("atest1-TCP-80-Internet"), + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Protocol: network.SecurityRuleProtocol("Tcp"), + SourcePortRange: to.StringPtr("*"), + DestinationPortRange: to.StringPtr("80"), + SourceAddressPrefix: to.StringPtr("Internet"), + DestinationAddressPrefix: to.StringPtr("fd00::eef0"), + Access: network.SecurityRuleAccess("Allow"), + Priority: to.Int32Ptr(500), + Direction: network.SecurityRuleDirection("Inbound"), + }, + }, + }, + }, + }, + }, } for i, test := range testCases { @@ -1811,7 +1841,7 @@ func TestSafeDeletePublicIP(t *testing.T) { if err != nil { t.Fatalf("TestCase[%d] meets unexpected error: %v", i, err) } - service := getTestService("test1", v1.ProtocolTCP, nil, 80) + service := getTestService("test1", v1.ProtocolTCP, nil, false, 80) err = az.safeDeletePublicIP(&service, "rg", test.pip, test.lb) assert.Equal(t, 0, len(*test.lb.FrontendIPConfigurations), "TestCase[%d]: %s", i, test.desc) assert.Equal(t, 0, len(*test.lb.LoadBalancingRules), "TestCase[%d]: %s", i, test.desc) @@ -1919,7 +1949,7 @@ func TestReconcilePublicIP(t *testing.T) { for i, test := range testCases { az := getTestCloud() - service := getTestService("test1", v1.ProtocolTCP, nil, 80) + service := getTestService("test1", v1.ProtocolTCP, nil, false, 80) service.Annotations = test.annotations for _, pip := range test.existingPIPs { _, err := az.PublicIPAddressesClient.CreateOrUpdate(context.TODO(), "rg", to.String(pip.Name), pip) @@ -2030,7 +2060,7 @@ func TestEnsurePublicIPExists(t *testing.T) { for i, test := range testCases { az := getTestCloud() - service := getTestService("test1", v1.ProtocolTCP, nil, 80) + service := getTestService("test1", v1.ProtocolTCP, nil, false, 80) for _, pip := range test.existingPIPs { _, err := az.PublicIPAddressesClient.CreateOrUpdate(context.TODO(), "rg", to.String(pip.Name), pip) if err != nil { @@ -2082,7 +2112,7 @@ func TestShouldUpdateLoadBalancer(t *testing.T) { for i, test := range testCases { az := getTestCloud() - service := getTestService("test1", v1.ProtocolTCP, nil, 80) + service := getTestService("test1", v1.ProtocolTCP, nil, false, 80) if test.lbHasDeletionTimestamp { service.ObjectMeta.DeletionTimestamp = &metav1.Time{time.Now()} } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go index 4f8251da87d66..fbd7fdbdf0a2c 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go @@ -74,10 +74,12 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( var err error klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) - var createZones *[]string + var createZones []string if len(options.AvailabilityZone) > 0 { - zoneList := []string{c.common.cloud.GetZoneID(options.AvailabilityZone)} - createZones = &zoneList + requestedZone := c.common.cloud.GetZoneID(options.AvailabilityZone) + if requestedZone != "" { + createZones = append(createZones, requestedZone) + } } // insert original tags to newTags @@ -132,13 +134,16 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( model := compute.Disk{ Location: &c.common.location, Tags: newTags, - Zones: createZones, Sku: &compute.DiskSku{ Name: diskSku, }, DiskProperties: &diskProperties, } + if len(createZones) > 0 { + model.Zones = &createZones + } + if options.ResourceGroup == "" { options.ResourceGroup = c.common.resourceGroup } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go index d8e2ef7f5b3a1..60000efb59fdd 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go @@ -66,7 +66,9 @@ const ( nodeLabelRole = "kubernetes.io/role" nicFailedState = "Failed" - storageAccountNameMaxLength = 24 + storageAccountNameMaxLength = 24 + frontendIPConfigNameMaxLength = 80 + loadBalancerRuleNameMaxLength = 80 ) var errNotInVMSet = errors.New("vm is not in the vmset") @@ -275,12 +277,21 @@ func getBackendPoolName(clusterName string, service *v1.Service) string { return clusterName } -func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, protocol v1.Protocol, port int32, subnetName *string) string { +func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, protocol v1.Protocol, port int32) string { prefix := az.getRulePrefix(service) - if subnetName == nil { - return fmt.Sprintf("%s-%s-%d", prefix, protocol, port) + ruleName := fmt.Sprintf("%s-%s-%d", prefix, protocol, port) + subnet := subnet(service) + if subnet == nil { + return ruleName } - return fmt.Sprintf("%s-%s-%s-%d", prefix, *subnetName, protocol, port) + + // Load balancer rule name must be less or equal to 80 characters, so excluding the hyphen two segments cannot exceed 79 + subnetSegment := *subnet + if len(ruleName)+len(subnetSegment)+1 > loadBalancerRuleNameMaxLength { + subnetSegment = subnetSegment[:loadBalancerRuleNameMaxLength-len(ruleName)-1] + } + + return fmt.Sprintf("%s-%s-%s-%d", prefix, subnetSegment, protocol, port) } func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string { @@ -318,10 +329,17 @@ func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, serv return strings.HasPrefix(*fip.Name, baseName) } -func (az *Cloud) getFrontendIPConfigName(service *v1.Service, subnetName *string) string { +func (az *Cloud) getFrontendIPConfigName(service *v1.Service) string { baseName := az.GetLoadBalancerName(context.TODO(), "", service) + subnetName := subnet(service) if subnetName != nil { - return fmt.Sprintf("%s-%s", baseName, *subnetName) + ipcName := fmt.Sprintf("%s-%s", baseName, *subnetName) + + // Azure lb front end configuration name must not exceed 80 characters + if len(ipcName) > frontendIPConfigNameMaxLength { + ipcName = ipcName[:frontendIPConfigNameMaxLength] + } + return ipcName } return baseName } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard_test.go index 1b9b218328242..ce091dfbc0734 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard_test.go @@ -19,6 +19,7 @@ limitations under the License. package azure import ( + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -264,3 +265,156 @@ func TestGetAzureLoadBalancerName(t *testing.T) { assert.Equal(t, c.expected, loadbalancerName, c.description) } } + +func TestGetLoadBalancingRuleName(t *testing.T) { + az := getTestCloud() + az.PrimaryAvailabilitySetName = "primary" + + svc := &v1.Service{ + ObjectMeta: meta.ObjectMeta{ + Annotations: map[string]string{}, + UID: "257b9655-5137-4ad2-b091-ef3f07043ad3", + }, + } + + cases := []struct { + description string + subnetName string + isInternal bool + useStandardLB bool + protocol v1.Protocol + port int32 + expected string + }{ + { + description: "internal lb should have subnet name on the rule name", + subnetName: "shortsubnet", + isInternal: true, + useStandardLB: true, + protocol: v1.ProtocolTCP, + port: 9000, + expected: "a257b965551374ad2b091ef3f07043ad-shortsubnet-TCP-9000", + }, + { + description: "internal standard lb should have subnet name on the rule name but truncated to 80 characters", + subnetName: "averylonnnngggnnnnnnnnnnnnnnnnnnnnnngggggggggggggggggggggggggggggggggggggsubet", + isInternal: true, + useStandardLB: true, + protocol: v1.ProtocolTCP, + port: 9000, + expected: "a257b965551374ad2b091ef3f07043ad-averylonnnngggnnnnnnnnnnnnnnnnnnnnnngg-TCP-9000", + }, + { + description: "internal basic lb should have subnet name on the rule name but truncated to 80 characters", + subnetName: "averylonnnngggnnnnnnnnnnnnnnnnnnnnnngggggggggggggggggggggggggggggggggggggsubet", + isInternal: true, + useStandardLB: false, + protocol: v1.ProtocolTCP, + port: 9000, + expected: "a257b965551374ad2b091ef3f07043ad-averylonnnngggnnnnnnnnnnnnnnnnnnnnnngg-TCP-9000", + }, + { + description: "external standard lb should not have subnet name on the rule name", + subnetName: "shortsubnet", + isInternal: false, + useStandardLB: true, + protocol: v1.ProtocolTCP, + port: 9000, + expected: "a257b965551374ad2b091ef3f07043ad-TCP-9000", + }, + { + description: "external basic lb should not have subnet name on the rule name", + subnetName: "shortsubnet", + isInternal: false, + useStandardLB: false, + protocol: v1.ProtocolTCP, + port: 9000, + expected: "a257b965551374ad2b091ef3f07043ad-TCP-9000", + }, + } + + for _, c := range cases { + if c.useStandardLB { + az.Config.LoadBalancerSku = loadBalancerSkuStandard + } else { + az.Config.LoadBalancerSku = loadBalancerSkuBasic + } + svc.Annotations[ServiceAnnotationLoadBalancerInternalSubnet] = c.subnetName + svc.Annotations[ServiceAnnotationLoadBalancerInternal] = strconv.FormatBool(c.isInternal) + + loadbalancerRuleName := az.getLoadBalancerRuleName(svc, c.protocol, c.port) + assert.Equal(t, c.expected, loadbalancerRuleName, c.description) + } +} + +func TestGetFrontendIPConfigName(t *testing.T) { + az := getTestCloud() + az.PrimaryAvailabilitySetName = "primary" + + svc := &v1.Service{ + ObjectMeta: meta.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerInternalSubnet: "subnet", + ServiceAnnotationLoadBalancerInternal: "true", + }, + UID: "257b9655-5137-4ad2-b091-ef3f07043ad3", + }, + } + + cases := []struct { + description string + subnetName string + isInternal bool + useStandardLB bool + expected string + }{ + { + description: "internal lb should have subnet name on the frontend ip configuration name", + subnetName: "shortsubnet", + isInternal: true, + useStandardLB: true, + expected: "a257b965551374ad2b091ef3f07043ad-shortsubnet", + }, + { + description: "internal standard lb should have subnet name on the frontend ip configuration name but truncated to 80 characters", + subnetName: "averylonnnngggnnnnnnnnnnnnnnnnnnnnnngggggggggggggggggggggggggggggggggggggsubet", + isInternal: true, + useStandardLB: true, + expected: "a257b965551374ad2b091ef3f07043ad-averylonnnngggnnnnnnnnnnnnnnnnnnnnnnggggggggggg", + }, + { + description: "internal basic lb should have subnet name on the frontend ip configuration name but truncated to 80 characters", + subnetName: "averylonnnngggnnnnnnnnnnnnnnnnnnnnnngggggggggggggggggggggggggggggggggggggsubet", + isInternal: true, + useStandardLB: false, + expected: "a257b965551374ad2b091ef3f07043ad-averylonnnngggnnnnnnnnnnnnnnnnnnnnnnggggggggggg", + }, + { + description: "external standard lb should not have subnet name on the frontend ip configuration name", + subnetName: "shortsubnet", + isInternal: false, + useStandardLB: true, + expected: "a257b965551374ad2b091ef3f07043ad", + }, + { + description: "external basic lb should not have subnet name on the frontend ip configuration name", + subnetName: "shortsubnet", + isInternal: false, + useStandardLB: false, + expected: "a257b965551374ad2b091ef3f07043ad", + }, + } + + for _, c := range cases { + if c.useStandardLB { + az.Config.LoadBalancerSku = loadBalancerSkuStandard + } else { + az.Config.LoadBalancerSku = loadBalancerSkuBasic + } + svc.Annotations[ServiceAnnotationLoadBalancerInternalSubnet] = c.subnetName + svc.Annotations[ServiceAnnotationLoadBalancerInternal] = strconv.FormatBool(c.isInternal) + + ipconfigName := az.getFrontendIPConfigName(svc) + assert.Equal(t, c.expected, ipconfigName, c.description) + } +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go index 13f2616c04cb3..524c2f3bb3e33 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go @@ -120,7 +120,7 @@ func TestParseConfig(t *testing.T) { // Test flipServiceInternalAnnotation func TestFlipServiceInternalAnnotation(t *testing.T) { - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) svcUpdated := flipServiceInternalAnnotation(&svc) if !requiresInternalLoadBalancer(svcUpdated) { t.Errorf("Expected svc to be an internal service") @@ -145,7 +145,7 @@ func TestFlipServiceInternalAnnotation(t *testing.T) { // Test additional of a new service/port. func TestAddPort(t *testing.T) { az := getTestCloud() - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) clusterResources := getClusterResources(az, 1, 1) svc.Spec.Ports = append(svc.Spec.Ports, v1.ServicePort{ @@ -200,7 +200,7 @@ func testLoadBalancerServiceDefaultModeSelection(t *testing.T, isInternal bool) svc = getInternalTestService(svcName, 8081) addTestSubnet(t, az, &svc) } else { - svc = getTestService(svcName, v1.ProtocolTCP, nil, 8081) + svc = getTestService(svcName, v1.ProtocolTCP, nil, false, 8081) } lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes) @@ -255,7 +255,7 @@ func testLoadBalancerServiceAutoModeSelection(t *testing.T, isInternal bool) { svc = getInternalTestService(svcName, 8081) addTestSubnet(t, az, &svc) } else { - svc = getTestService(svcName, v1.ProtocolTCP, nil, 8081) + svc = getTestService(svcName, v1.ProtocolTCP, nil, false, 8081) } setLoadBalancerAutoModeAnnotation(&svc) lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes) @@ -318,7 +318,7 @@ func testLoadBalancerServicesSpecifiedSelection(t *testing.T, isInternal bool) { svc = getInternalTestService(svcName, 8081) addTestSubnet(t, az, &svc) } else { - svc = getTestService(svcName, v1.ProtocolTCP, nil, 8081) + svc = getTestService(svcName, v1.ProtocolTCP, nil, false, 8081) } lbMode := fmt.Sprintf("%s,%s", selectedAvailabilitySetName1, selectedAvailabilitySetName2) setLoadBalancerModeAnnotation(&svc, lbMode) @@ -360,7 +360,7 @@ func testLoadBalancerMaxRulesServices(t *testing.T, isInternal bool) { svc = getInternalTestService(svcName, 8081) addTestSubnet(t, az, &svc) } else { - svc = getTestService(svcName, v1.ProtocolTCP, nil, 8081) + svc = getTestService(svcName, v1.ProtocolTCP, nil, false, 8081) } lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes) @@ -389,7 +389,7 @@ func testLoadBalancerMaxRulesServices(t *testing.T, isInternal bool) { svc = getInternalTestService(svcName, 8081) addTestSubnet(t, az, &svc) } else { - svc = getTestService(svcName, v1.ProtocolTCP, nil, 8081) + svc = getTestService(svcName, v1.ProtocolTCP, nil, false, 8081) } _, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes) if err == nil { @@ -419,7 +419,7 @@ func testLoadBalancerServiceAutoModeDeleteSelection(t *testing.T, isInternal boo svc = getInternalTestService(svcName, 8081) addTestSubnet(t, az, &svc) } else { - svc = getTestService(svcName, v1.ProtocolTCP, nil, 8081) + svc = getTestService(svcName, v1.ProtocolTCP, nil, false, 8081) } setLoadBalancerAutoModeAnnotation(&svc) lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &svc, clusterResources.nodes) @@ -438,7 +438,7 @@ func testLoadBalancerServiceAutoModeDeleteSelection(t *testing.T, isInternal boo svc = getInternalTestService(svcName, 8081) addTestSubnet(t, az, &svc) } else { - svc = getTestService(svcName, v1.ProtocolTCP, nil, 8081) + svc = getTestService(svcName, v1.ProtocolTCP, nil, false, 8081) } setLoadBalancerAutoModeAnnotation(&svc) @@ -482,7 +482,7 @@ func TestReconcileLoadBalancerAddServiceOnInternalSubnet(t *testing.T) { func TestReconcileSecurityGroupFromAnyDestinationAddressPrefixToLoadBalancerIP(t *testing.T) { az := getTestCloud() - svc1 := getTestService("serviceea", v1.ProtocolTCP, nil, 80) + svc1 := getTestService("serviceea", v1.ProtocolTCP, nil, false, 80) svc1.Spec.LoadBalancerIP = "192.168.0.0" sg := getTestSecurityGroup(az) // Simulate a pre-Kubernetes 1.8 NSG, where we do not specify the destination address prefix @@ -499,7 +499,7 @@ func TestReconcileSecurityGroupFromAnyDestinationAddressPrefixToLoadBalancerIP(t func TestReconcileSecurityGroupDynamicLoadBalancerIP(t *testing.T) { az := getTestCloud() - svc1 := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svc1 := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) svc1.Spec.LoadBalancerIP = "" sg := getTestSecurityGroup(az) dynamicallyAssignedIP := "192.168.0.0" @@ -514,7 +514,7 @@ func TestReconcileSecurityGroupDynamicLoadBalancerIP(t *testing.T) { func TestReconcileLoadBalancerAddServicesOnMultipleSubnets(t *testing.T) { az := getTestCloud() clusterResources := getClusterResources(az, 1, 1) - svc1 := getTestService("service1", v1.ProtocolTCP, nil, 8081) + svc1 := getTestService("service1", v1.ProtocolTCP, nil, false, 8081) svc2 := getInternalTestService("service2", 8081) // Internal and External service cannot reside on the same LB resource @@ -580,7 +580,7 @@ func TestReconcileLoadBalancerEditServiceSubnet(t *testing.T) { func TestReconcileLoadBalancerNodeHealth(t *testing.T) { az := getTestCloud() clusterResources := getClusterResources(az, 1, 1) - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal svc.Spec.HealthCheckNodePort = int32(32456) @@ -601,7 +601,7 @@ func TestReconcileLoadBalancerNodeHealth(t *testing.T) { func TestReconcileLoadBalancerRemoveService(t *testing.T) { az := getTestCloud() clusterResources := getClusterResources(az, 1, 1) - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80, 443) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80, 443) lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) if err != nil { @@ -625,7 +625,7 @@ func TestReconcileLoadBalancerRemoveService(t *testing.T) { func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) { az := getTestCloud() clusterResources := getClusterResources(az, 1, 1) - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) if err != nil { @@ -633,7 +633,7 @@ func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) } validateLoadBalancer(t, lb, svc) - svcUpdated := getTestService("servicea", v1.ProtocolTCP, nil) + svcUpdated := getTestService("servicea", v1.ProtocolTCP, nil, false) lb, err = az.reconcileLoadBalancer(testClusterName, &svcUpdated, clusterResources.nodes, false /* wantLb*/) if err != nil { t.Errorf("Unexpected error: %q", err) @@ -652,13 +652,13 @@ func TestReconcileLoadBalancerRemovesPort(t *testing.T) { az := getTestCloud() clusterResources := getClusterResources(az, 1, 1) - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80, 443) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80, 443) lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error: %q", err) } - svcUpdated := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svcUpdated := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) lb, err = az.reconcileLoadBalancer(testClusterName, &svcUpdated, clusterResources.nodes, true /* wantLb */) if err != nil { t.Errorf("Unexpected error: %q", err) @@ -671,8 +671,8 @@ func TestReconcileLoadBalancerRemovesPort(t *testing.T) { func TestReconcileLoadBalancerMultipleServices(t *testing.T) { az := getTestCloud() clusterResources := getClusterResources(az, 1, 1) - svc1 := getTestService("servicea", v1.ProtocolTCP, nil, 80, 443) - svc2 := getTestService("serviceb", v1.ProtocolTCP, nil, 80) + svc1 := getTestService("servicea", v1.ProtocolTCP, nil, false, 80, 443) + svc2 := getTestService("serviceb", v1.ProtocolTCP, nil, false, 80) updatedLoadBalancer, err := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true /* wantLb */) if err != nil { @@ -698,7 +698,7 @@ func findLBRuleForPort(lbRules []network.LoadBalancingRule, port int32) (network func TestServiceDefaultsToNoSessionPersistence(t *testing.T) { az := getTestCloud() - svc := getTestService("service-sa-omitted", v1.ProtocolTCP, nil, 7170) + svc := getTestService("service-sa-omitted", v1.ProtocolTCP, nil, false, 7170) clusterResources := getClusterResources(az, 1, 1) lb, err := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true /* wantLb */) @@ -718,7 +718,7 @@ func TestServiceDefaultsToNoSessionPersistence(t *testing.T) { func TestServiceRespectsNoSessionAffinity(t *testing.T) { az := getTestCloud() - svc := getTestService("service-sa-none", v1.ProtocolTCP, nil, 7170) + svc := getTestService("service-sa-none", v1.ProtocolTCP, nil, false, 7170) svc.Spec.SessionAffinity = v1.ServiceAffinityNone clusterResources := getClusterResources(az, 1, 1) @@ -741,7 +741,7 @@ func TestServiceRespectsNoSessionAffinity(t *testing.T) { func TestServiceRespectsClientIPSessionAffinity(t *testing.T) { az := getTestCloud() - svc := getTestService("service-sa-clientip", v1.ProtocolTCP, nil, 7170) + svc := getTestService("service-sa-clientip", v1.ProtocolTCP, nil, false, 7170) svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP clusterResources := getClusterResources(az, 1, 1) @@ -765,7 +765,7 @@ func TestServiceRespectsClientIPSessionAffinity(t *testing.T) { func TestReconcileSecurityGroupNewServiceAddsPort(t *testing.T) { az := getTestCloud() getTestSecurityGroup(az) - svc1 := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svc1 := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) clusterResources := getClusterResources(az, 1, 1) lb, _ := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true) lbStatus, _ := az.getServiceLoadBalancerStatus(&svc1, lb) @@ -797,8 +797,8 @@ func TestReconcileSecurityGroupNewInternalServiceAddsPort(t *testing.T) { func TestReconcileSecurityGroupRemoveService(t *testing.T) { az := getTestCloud() - service1 := getTestService("servicea", v1.ProtocolTCP, nil, 81) - service2 := getTestService("serviceb", v1.ProtocolTCP, nil, 82) + service1 := getTestService("servicea", v1.ProtocolTCP, nil, false, 81) + service2 := getTestService("serviceb", v1.ProtocolTCP, nil, false, 82) clusterResources := getClusterResources(az, 1, 1) lb, _ := az.reconcileLoadBalancer(testClusterName, &service1, clusterResources.nodes, true) @@ -819,11 +819,11 @@ func TestReconcileSecurityGroupRemoveService(t *testing.T) { func TestReconcileSecurityGroupRemoveServiceRemovesPort(t *testing.T) { az := getTestCloud() - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80, 443) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80, 443) clusterResources := getClusterResources(az, 1, 1) sg := getTestSecurityGroup(az, svc) - svcUpdated := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svcUpdated := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) lb, _ := az.reconcileLoadBalancer(testClusterName, &svc, clusterResources.nodes, true) lbStatus, _ := az.getServiceLoadBalancerStatus(&svc, lb) @@ -837,7 +837,7 @@ func TestReconcileSecurityGroupRemoveServiceRemovesPort(t *testing.T) { func TestReconcileSecurityWithSourceRanges(t *testing.T) { az := getTestCloud() - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80, 443) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80, 443) svc.Spec.LoadBalancerSourceRanges = []string{ "192.168.0.0/24", "10.0.0.0/32", @@ -864,7 +864,7 @@ func TestReconcileSecurityGroupEtagMismatch(t *testing.T) { cachedSG.Etag = to.StringPtr("1111111-0000-0000-0000-000000000000") az.nsgCache.Set(to.String(sg.Name), &cachedSG) - svc1 := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svc1 := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) clusterResources := getClusterResources(az, 1, 1) lb, _ := az.reconcileLoadBalancer(testClusterName, &svc1, clusterResources.nodes, true) lbStatus, _ := az.getServiceLoadBalancerStatus(&svc1, lb) @@ -877,7 +877,7 @@ func TestReconcileSecurityGroupEtagMismatch(t *testing.T) { func TestReconcilePublicIPWithNewService(t *testing.T) { az := getTestCloud() - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80, 443) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80, 443) pip, err := az.reconcilePublicIP(testClusterName, &svc, "", true /* wantLb*/) if err != nil { @@ -898,7 +898,7 @@ func TestReconcilePublicIPWithNewService(t *testing.T) { func TestReconcilePublicIPRemoveService(t *testing.T) { az := getTestCloud() - svc := getTestService("servicea", v1.ProtocolTCP, nil, 80, 443) + svc := getTestService("servicea", v1.ProtocolTCP, nil, false, 80, 443) pip, err := az.reconcilePublicIP(testClusterName, &svc, "", true /* wantLb*/) if err != nil { @@ -939,7 +939,7 @@ func TestReconcilePublicIPWithExternalAndInternalSwitch(t *testing.T) { validatePublicIP(t, pip, &svc, true) // Update to external service - svcUpdated := getTestService("servicea", v1.ProtocolTCP, nil, 80) + svcUpdated := getTestService("servicea", v1.ProtocolTCP, nil, false, 80) pip, err = az.reconcilePublicIP(testClusterName, &svcUpdated, "", true /* wantLb*/) if err != nil { t.Errorf("Unexpected error: %q", err) @@ -1131,7 +1131,7 @@ func getBackendPort(port int32) int32 { return port + 10000 } -func getTestService(identifier string, proto v1.Protocol, annotations map[string]string, requestedPorts ...int32) v1.Service { +func getTestService(identifier string, proto v1.Protocol, annotations map[string]string, isIPv6 bool, requestedPorts ...int32) v1.Service { ports := []v1.ServicePort{} for _, port := range requestedPorts { ports = append(ports, v1.ServicePort{ @@ -1157,17 +1157,22 @@ func getTestService(identifier string, proto v1.Protocol, annotations map[string svc.Annotations = annotations } + svc.Spec.ClusterIP = "10.0.0.2" + if isIPv6 { + svc.Spec.ClusterIP = "fd00::1907" + } + return svc } func getInternalTestService(identifier string, requestedPorts ...int32) v1.Service { - svc := getTestService(identifier, v1.ProtocolTCP, nil, requestedPorts...) + svc := getTestService(identifier, v1.ProtocolTCP, nil, false, requestedPorts...) svc.Annotations[ServiceAnnotationLoadBalancerInternal] = "true" return svc } func getResourceGroupTestService(identifier, resourceGroup, loadBalancerIP string, requestedPorts ...int32) v1.Service { - svc := getTestService(identifier, v1.ProtocolTCP, nil, requestedPorts...) + svc := getTestService(identifier, v1.ProtocolTCP, nil, false, requestedPorts...) svc.Spec.LoadBalancerIP = loadBalancerIP svc.Annotations[ServiceAnnotationLoadBalancerResourceGroup] = resourceGroup return svc @@ -1240,14 +1245,14 @@ func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, serv if len(svc.Spec.Ports) > 0 { expectedFrontendIPCount++ expectedFrontendIP := ExpectedFrontendIPInfo{ - Name: az.getFrontendIPConfigName(&svc, subnet(&svc)), + Name: az.getFrontendIPConfigName(&svc), Subnet: subnet(&svc), } expectedFrontendIPs = append(expectedFrontendIPs, expectedFrontendIP) } for _, wantedRule := range svc.Spec.Ports { expectedRuleCount++ - wantedRuleName := az.getLoadBalancerRuleName(&svc, wantedRule.Protocol, wantedRule.Port, subnet(&svc)) + wantedRuleName := az.getLoadBalancerRuleName(&svc, wantedRule.Protocol, wantedRule.Port) foundRule := false for _, actualRule := range *loadBalancer.LoadBalancingRules { if strings.EqualFold(*actualRule.Name, wantedRuleName) && @@ -1814,7 +1819,7 @@ func addTestSubnet(t *testing.T, az *Cloud, svc *v1.Service) { func TestIfServiceSpecifiesSharedRuleAndRuleDoesNotExistItIsCreated(t *testing.T) { az := getTestCloud() - svc := getTestService("servicesr", v1.ProtocolTCP, nil, 80) + svc := getTestService("servicesr", v1.ProtocolTCP, nil, false, 80) svc.Spec.LoadBalancerIP = "192.168.77.88" svc.Annotations[ServiceAnnotationSharedSecurityRule] = "true" @@ -1853,7 +1858,7 @@ func TestIfServiceSpecifiesSharedRuleAndRuleDoesNotExistItIsCreated(t *testing.T func TestIfServiceSpecifiesSharedRuleAndRuleExistsThenTheServicesPortAndAddressAreAdded(t *testing.T) { az := getTestCloud() - svc := getTestService("servicesr", v1.ProtocolTCP, nil, 80) + svc := getTestService("servicesr", v1.ProtocolTCP, nil, false, 80) svc.Spec.LoadBalancerIP = "192.168.77.88" svc.Annotations[ServiceAnnotationSharedSecurityRule] = "true" @@ -1906,11 +1911,11 @@ func TestIfServiceSpecifiesSharedRuleAndRuleExistsThenTheServicesPortAndAddressA func TestIfServicesSpecifySharedRuleButDifferentPortsThenSeparateRulesAreCreated(t *testing.T) { az := getTestCloud() - svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, 4444) + svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, false, 4444) svc1.Spec.LoadBalancerIP = "192.168.77.88" svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, 8888) + svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, false, 8888) svc2.Spec.LoadBalancerIP = "192.168.33.44" svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true" @@ -1975,11 +1980,11 @@ func TestIfServicesSpecifySharedRuleButDifferentPortsThenSeparateRulesAreCreated func TestIfServicesSpecifySharedRuleButDifferentProtocolsThenSeparateRulesAreCreated(t *testing.T) { az := getTestCloud() - svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, 4444) + svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, false, 4444) svc1.Spec.LoadBalancerIP = "192.168.77.88" svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc2 := getTestService("servicesr2", v1.ProtocolUDP, nil, 4444) + svc2 := getTestService("servicesr2", v1.ProtocolUDP, nil, false, 4444) svc2.Spec.LoadBalancerIP = "192.168.77.88" svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true" @@ -2042,12 +2047,12 @@ func TestIfServicesSpecifySharedRuleButDifferentProtocolsThenSeparateRulesAreCre func TestIfServicesSpecifySharedRuleButDifferentSourceAddressesThenSeparateRulesAreCreated(t *testing.T) { az := getTestCloud() - svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, 80) + svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, false, 80) svc1.Spec.LoadBalancerIP = "192.168.77.88" svc1.Spec.LoadBalancerSourceRanges = []string{"192.168.12.0/24"} svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, 80) + svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, false, 80) svc2.Spec.LoadBalancerIP = "192.168.33.44" svc2.Spec.LoadBalancerSourceRanges = []string{"192.168.34.0/24"} svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true" @@ -2113,15 +2118,15 @@ func TestIfServicesSpecifySharedRuleButDifferentSourceAddressesThenSeparateRules func TestIfServicesSpecifySharedRuleButSomeAreOnDifferentPortsThenRulesAreSeparatedOrConsoliatedByPort(t *testing.T) { az := getTestCloud() - svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, 4444) + svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, false, 4444) svc1.Spec.LoadBalancerIP = "192.168.77.88" svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, 8888) + svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, false, 8888) svc2.Spec.LoadBalancerIP = "192.168.33.44" svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc3 := getTestService("servicesr3", v1.ProtocolTCP, nil, 4444) + svc3 := getTestService("servicesr3", v1.ProtocolTCP, nil, false, 4444) svc3.Spec.LoadBalancerIP = "192.168.99.11" svc3.Annotations[ServiceAnnotationSharedSecurityRule] = "true" @@ -2213,11 +2218,11 @@ func TestIfServicesSpecifySharedRuleButSomeAreOnDifferentPortsThenRulesAreSepara func TestIfServiceSpecifiesSharedRuleAndServiceIsDeletedThenTheServicesPortAndAddressAreRemoved(t *testing.T) { az := getTestCloud() - svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, 80) + svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, false, 80) svc1.Spec.LoadBalancerIP = "192.168.77.88" svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, 80) + svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, false, 80) svc2.Spec.LoadBalancerIP = "192.168.33.44" svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true" @@ -2268,15 +2273,15 @@ func TestIfServiceSpecifiesSharedRuleAndServiceIsDeletedThenTheServicesPortAndAd func TestIfSomeServicesShareARuleAndOneIsDeletedItIsRemovedFromTheRightRule(t *testing.T) { az := getTestCloud() - svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, 4444) + svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, false, 4444) svc1.Spec.LoadBalancerIP = "192.168.77.88" svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, 8888) + svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, false, 8888) svc2.Spec.LoadBalancerIP = "192.168.33.44" svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc3 := getTestService("servicesr3", v1.ProtocolTCP, nil, 4444) + svc3 := getTestService("servicesr3", v1.ProtocolTCP, nil, false, 4444) svc3.Spec.LoadBalancerIP = "192.168.99.11" svc3.Annotations[ServiceAnnotationSharedSecurityRule] = "true" @@ -2375,15 +2380,15 @@ func TestIfSomeServicesShareARuleAndOneIsDeletedItIsRemovedFromTheRightRule(t *t func TestIfServiceSpecifiesSharedRuleAndLastServiceIsDeletedThenRuleIsDeleted(t *testing.T) { az := getTestCloud() - svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, 4444) + svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, false, 4444) svc1.Spec.LoadBalancerIP = "192.168.77.88" svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, 8888) + svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, false, 8888) svc2.Spec.LoadBalancerIP = "192.168.33.44" svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc3 := getTestService("servicesr3", v1.ProtocolTCP, nil, 4444) + svc3 := getTestService("servicesr3", v1.ProtocolTCP, nil, false, 4444) svc3.Spec.LoadBalancerIP = "192.168.99.11" svc3.Annotations[ServiceAnnotationSharedSecurityRule] = "true" @@ -2455,23 +2460,23 @@ func TestIfServiceSpecifiesSharedRuleAndLastServiceIsDeletedThenRuleIsDeleted(t func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) { az := getTestCloud() - svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, 4444) + svc1 := getTestService("servicesr1", v1.ProtocolTCP, nil, false, 4444) svc1.Spec.LoadBalancerIP = "192.168.77.88" svc1.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, 8888) + svc2 := getTestService("servicesr2", v1.ProtocolTCP, nil, false, 8888) svc2.Spec.LoadBalancerIP = "192.168.33.44" svc2.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc3 := getTestService("servicesr3", v1.ProtocolTCP, nil, 4444) + svc3 := getTestService("servicesr3", v1.ProtocolTCP, nil, false, 4444) svc3.Spec.LoadBalancerIP = "192.168.99.11" svc3.Annotations[ServiceAnnotationSharedSecurityRule] = "true" - svc4 := getTestService("servicesr4", v1.ProtocolTCP, nil, 4444) + svc4 := getTestService("servicesr4", v1.ProtocolTCP, nil, false, 4444) svc4.Spec.LoadBalancerIP = "192.168.22.33" svc4.Annotations[ServiceAnnotationSharedSecurityRule] = "false" - svc5 := getTestService("servicesr5", v1.ProtocolTCP, nil, 8888) + svc5 := getTestService("servicesr5", v1.ProtocolTCP, nil, false, 8888) svc5.Spec.LoadBalancerIP = "192.168.22.33" svc5.Annotations[ServiceAnnotationSharedSecurityRule] = "false" diff --git a/test/e2e/storage/testsuites/disruptive.go b/test/e2e/storage/testsuites/disruptive.go index 539741f8e11bd..7deee105df3db 100644 --- a/test/e2e/storage/testsuites/disruptive.go +++ b/test/e2e/storage/testsuites/disruptive.go @@ -139,8 +139,9 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern } for _, test := range disruptiveTestTable { - if test.runTestFile != nil { - func(t disruptiveTest) { + func(t disruptiveTest) { + if (pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil) || + (pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) { ginkgo.It(t.testItStmt, func() { init() defer cleanup() @@ -157,13 +158,14 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, framework.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout) framework.ExpectNoError(err, "While creating pods for kubelet restart test") - if pattern.VolMode == v1.PersistentVolumeBlock { + if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil { t.runTestBlock(l.cs, l.config.Framework, l.pod) - } else { + } + if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil { t.runTestFile(l.cs, l.config.Framework, l.pod) } }) - }(test) - } + } + }(test) } } diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 217489bc5c4a7..3f709d55486da 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -319,18 +319,28 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra nodeIP = nodeIP + ":22" // Creating command to check whether path exists - command := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID) + podDirectoryCmd := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID) if isSudoPresent(nodeIP, framework.TestContext.Provider) { - command = fmt.Sprintf("sudo sh -c \"%s\"", command) + podDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", podDirectoryCmd) + } + // Directories in the global directory have unpredictable names, however, device symlinks + // have the same name as pod.UID. So just find anything with pod.UID name. + globalBlockDirectoryCmd := fmt.Sprintf("find /var/lib/kubelet/plugins -name %s", clientPod.UID) + if isSudoPresent(nodeIP, framework.TestContext.Provider) { + globalBlockDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", globalBlockDirectoryCmd) } ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.") - result, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider) + result, err := e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) - // TODO: Needs to check GetGlobalMapPath and descriptor lock, as well. + ginkgo.By("Expecting the symlinks from global map path to be found.") + result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider) + e2essh.LogResult(result) + framework.ExpectNoError(err, "Encountered SSH error.") + framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code)) // This command is to make sure kubelet is started after test finishes no matter it fails or not. defer func() { @@ -359,12 +369,16 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra } ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.") - result, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider) + result, err = e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider) e2essh.LogResult(result) framework.ExpectNoError(err, "Encountered SSH error.") gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.") - // TODO: Needs to check GetGlobalMapPath and descriptor lock, as well. + ginkgo.By("Expecting the symlinks from global map path not to be found.") + result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider) + e2essh.LogResult(result) + framework.ExpectNoError(err, "Encountered SSH error.") + gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected find stdout to be empty.") e2elog.Logf("Volume unmaped on node %s", clientPod.Spec.NodeName) } diff --git a/test/e2e_node/remote/utils.go b/test/e2e_node/remote/utils.go index e7b1647a8fd27..023df3cf2e625 100644 --- a/test/e2e_node/remote/utils.go +++ b/test/e2e_node/remote/utils.go @@ -27,11 +27,11 @@ import ( // utils.go contains functions used across test suites. const ( - cniVersion = "v0.7.5" + cniVersion = "v0.8.6" cniArch = "amd64" cniDirectory = "cni/bin" // The CNI tarball places binaries under directory under "cni/bin". cniConfDirectory = "cni/net.d" - cniURL = "https://dl.k8s.io/network-plugins/cni-plugins-" + cniArch + "-" + cniVersion + ".tgz" + cniURL = "https://storage.googleapis.com/k8s-artifacts-cni/release/" + cniVersion + "/" + "cni-plugins-linux-" + cniArch + "-" + cniVersion + ".tgz" ) const cniConfig = `{ diff --git a/test/integration/apiserver/apply/apply_test.go b/test/integration/apiserver/apply/apply_test.go index a374e6e7bf181..29ef1236fc5d4 100644 --- a/test/integration/apiserver/apply/apply_test.go +++ b/test/integration/apiserver/apply/apply_test.go @@ -1271,6 +1271,236 @@ func TestClearManagedFieldsWithUpdate(t *testing.T) { } } +// TestErrorsDontFail +func TestErrorsDontFail(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + // Tries to create with a managed fields that has an empty `fieldsType`. + _, err := client.CoreV1().RESTClient().Post(). + Namespace("default"). + Resource("configmaps"). + Param("fieldManager", "apply_test"). + Body([]byte(`{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm", + "namespace": "default", + "managedFields": [{ + "manager": "apply_test", + "operation": "Apply", + "apiVersion": "v1", + "time": "2019-07-08T09:31:18Z", + "fieldsType": "", + "fieldsV1": {} + }], + "labels": { + "test-label": "test" + } + }, + "data": { + "key": "value" + } + }`)). + Do(). + Get() + if err != nil { + t.Fatalf("Failed to create object with empty fieldsType: %v", err) + } +} + +func TestErrorsDontFailUpdate(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + _, err := client.CoreV1().RESTClient().Post(). + Namespace("default"). + Resource("configmaps"). + Param("fieldManager", "apply_test"). + Body([]byte(`{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm", + "namespace": "default", + "labels": { + "test-label": "test" + } + }, + "data": { + "key": "value" + } + }`)). + Do(). + Get() + if err != nil { + t.Fatalf("Failed to create object: %v", err) + } + + _, err = client.CoreV1().RESTClient().Put(). + Namespace("default"). + Resource("configmaps"). + Name("test-cm"). + Param("fieldManager", "apply_test"). + Body([]byte(`{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm", + "namespace": "default", + "managedFields": [{ + "manager": "apply_test", + "operation": "Apply", + "apiVersion": "v1", + "time": "2019-07-08T09:31:18Z", + "fieldsType": "", + "fieldsV1": {} + }], + "labels": { + "test-label": "test" + } + }, + "data": { + "key": "value" + } + }`)). + Do(). + Get() + if err != nil { + t.Fatalf("Failed to update object with empty fieldsType: %v", err) + } +} + +func TestErrorsDontFailPatch(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + _, err := client.CoreV1().RESTClient().Post(). + Namespace("default"). + Resource("configmaps"). + Param("fieldManager", "apply_test"). + Body([]byte(`{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm", + "namespace": "default", + "labels": { + "test-label": "test" + } + }, + "data": { + "key": "value" + } + }`)). + Do(). + Get() + if err != nil { + t.Fatalf("Failed to create object: %v", err) + } + + _, err = client.CoreV1().RESTClient().Patch(types.JSONPatchType). + Namespace("default"). + Resource("configmaps"). + Name("test-cm"). + Param("fieldManager", "apply_test"). + Body([]byte(`[{"op": "replace", "path": "/metadata/managedFields", "value": [{ + "manager": "apply_test", + "operation": "Apply", + "apiVersion": "v1", + "time": "2019-07-08T09:31:18Z", + "fieldsType": "", + "fieldsV1": {} + }]}]`)). + Do(). + Get() + if err != nil { + t.Fatalf("Failed to patch object with empty FieldsType: %v", err) + } + +} + +// TestClearManagedFieldsWithUpdateEmptyList verifies it's possible to clear the managedFields by sending an empty list. +func TestClearManagedFieldsWithUpdateEmptyList(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + _, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + Namespace("default"). + Resource("configmaps"). + Name("test-cm"). + Param("fieldManager", "apply_test"). + Body([]byte(`{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm", + "namespace": "default", + "labels": { + "test-label": "test" + } + }, + "data": { + "key": "value" + } + }`)). + Do(). + Get() + if err != nil { + t.Fatalf("Failed to create object using Apply patch: %v", err) + } + + _, err = client.CoreV1().RESTClient().Put(). + Namespace("default"). + Resource("configmaps"). + Name("test-cm"). + Body([]byte(`{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-cm", + "namespace": "default", + "managedFields": [], + "labels": { + "test-label": "test" + } + }, + "data": { + "key": "value" + } + }`)).Do().Get() + if err != nil { + t.Fatalf("Failed to patch object: %v", err) + } + + object, err := client.CoreV1().RESTClient().Get().Namespace("default").Resource("configmaps").Name("test-cm").Do().Get() + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + + accessor, err := meta.Accessor(object) + if err != nil { + t.Fatalf("Failed to get meta accessor: %v", err) + } + + if managedFields := accessor.GetManagedFields(); len(managedFields) != 0 { + t.Fatalf("Failed to clear managedFields, got: %v", managedFields) + } + + if labels := accessor.GetLabels(); len(labels) < 1 { + t.Fatalf("Expected other fields to stay untouched, got: %v", object) + } +} + var podBytes = []byte(` apiVersion: v1 kind: Pod