From 3e240013ecf80f48bb4584aa777d9707aa9aae97 Mon Sep 17 00:00:00 2001 From: hamistao Date: Thu, 28 Nov 2024 17:04:33 -0300 Subject: [PATCH 01/10] api: Add `projects_limits_uplink_ips` extension Signed-off-by: hamistao --- doc/api-extensions.md | 5 +++++ shared/version/api.go | 1 + 2 files changed, 6 insertions(+) diff --git a/doc/api-extensions.md b/doc/api-extensions.md index 31d79054b842..694140531e15 100644 --- a/doc/api-extensions.md +++ b/doc/api-extensions.md @@ -2552,3 +2552,8 @@ This adds support for listing network zones across all projects using the `all-p Adds support for instance root volumes to be attached to other instances as disk devices. Introduces the `/` syntax for the `source` property of disk devices. + +## `projects_limits_uplink_ips` + +Introduces per-project uplink IP limits for each available uplink network, adding `limits.networks.uplink_ips.ipv4.NETWORK_NAME` and `limits.networks.uplink_ips.ipv6.NETWORK_NAME` configuration keys for projects with `features.networks` enabled. +These keys define the maximum value of IPs made available on a network named NETWORK_NAME to be assigned as uplink IPs for entities inside a certain project. These entities can be other networks, network forwards or load balancers. diff --git a/shared/version/api.go b/shared/version/api.go index 278ecac464a2..db46490453ba 100644 --- a/shared/version/api.go +++ b/shared/version/api.go @@ -431,6 +431,7 @@ var APIExtensions = []string{ "network_get_target", "network_zones_all_projects", "instance_root_volume_attachment", + "projects_limits_uplink_ips", } // APIExtensionsCount returns the number of available API extensions. From f9ab2859120f09c3e7620f36bf65c56815d96620 Mon Sep 17 00:00:00 2001 From: hamistao Date: Mon, 2 Dec 2024 18:13:03 -0300 Subject: [PATCH 02/10] lxd/project/limits: Create `UplinkAddressQuotasExceeded` Signed-off-by: hamistao --- lxd/project/limits/permissions.go | 97 +++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/lxd/project/limits/permissions.go b/lxd/project/limits/permissions.go index e809c62f57c1..cc504b3a6a95 100644 --- a/lxd/project/limits/permissions.go +++ b/lxd/project/limits/permissions.go @@ -1661,3 +1661,100 @@ func CheckTarget(ctx context.Context, authorizer auth.Authorizer, r *http.Reques return nil, "", nil } + +// uplinkIPLimits is a type used to help check uplink IP quota usage. +type uplinkIPLimits struct { + quotaIPV4 int + quotaIPV6 int + usedIPV4Addresses int + usedIPV6Addresses int +} + +func (q *uplinkIPLimits) increment(incrementIPV4 bool, incrementIPV6 bool) { + if incrementIPV4 { + q.usedIPV4Addresses++ + } + + if incrementIPV6 { + q.usedIPV6Addresses++ + } +} + +func (q *uplinkIPLimits) hasExceeded() bool { + return q.usedIPV4Addresses > q.quotaIPV4 && q.usedIPV6Addresses > q.quotaIPV6 +} + +// UplinkAddressQuotasExceeded checks whether the number of current uplink addresses used in project +// projectName on network networkName is higher than their provided quota for each IP protocol. +// Uplink addresses can be consumed by load balancers, network forwards and networks. +// For simplicity, this function assumes both limits are provided and returns early if both provided +// quotas are exceeded. So if one of the limits is not of the caller's interest, -1 should be provided +// and the result for that protocol should be ignored. +func UplinkAddressQuotasExceeded(ctx context.Context, tx *db.ClusterTx, projectName string, networkName string, uplinkIPV4Quota int, uplinkIPV6Quota int) (V4QuotaExceeded bool, V6QuotaExceeded bool, err error) { + quotas := uplinkIPLimits{ + quotaIPV4: uplinkIPV4Quota, + quotaIPV6: uplinkIPV6Quota, + } + + // If both provided quotas are below 0, return right away. + if quotas.hasExceeded() { + return true, true, nil + } + + // First count uplink addresses for other networks. + projectNetworks, err := tx.GetCreatedNetworksByProject(ctx, projectName) + if err != nil { + return false, false, nil + } + + for _, network := range projectNetworks { + // Check if each network is using our target network as an uplink. + if network.Config["network"] == networkName { + _, hasIPV6 := network.Config["volatile.network.ipv6.address"] + _, hasIPV4 := network.Config["volatile.network.ipv4.address"] + quotas.increment(hasIPV4, hasIPV6) + if quotas.hasExceeded() { + return true, true, nil + } + } + } + + // Count listen addresses for network forwards. + forwardListenAddressesMap, err := tx.GetProjectNetworkForwardListenAddressesByUplink(ctx, networkName, false) + if err != nil { + return false, false, err + } + + // Iterate through each network on the provided project while counting the uplink addresses used by their + // network forwards. + for _, addresses := range forwardListenAddressesMap[projectName] { + for _, address := range addresses { + isIPV6 := validate.IsNetworkAddressV6(address) == nil + quotas.increment(!isIPV6, isIPV6) + if quotas.hasExceeded() { + return true, true, nil + } + } + } + + // Count listen addresses for load balancers. + loadBalancerAddressesMap, err := tx.GetProjectNetworkLoadBalancerListenAddressesByUplink(ctx, networkName, false) + if err != nil { + return false, false, err + } + + // Iterate through each network on the provided project while counting the uplink addresses used by their + // load balancers. + for _, addresses := range loadBalancerAddressesMap[projectName] { + for _, address := range addresses { + isIPV6 := validate.IsNetworkAddressV6(address) == nil + quotas.increment(!isIPV6, isIPV6) + if quotas.hasExceeded() { + return true, true, nil + } + } + } + + // At least one of the quotas were not exceeded. + return quotas.usedIPV4Addresses > quotas.quotaIPV4, quotas.usedIPV6Addresses > quotas.quotaIPV6, err +} From eae421600c3359c054bb6dcb135f1adba9265be1 Mon Sep 17 00:00:00 2001 From: hamistao Date: Thu, 28 Nov 2024 16:45:18 -0300 Subject: [PATCH 03/10] lxd/api_project: Add `projectName` argument to `projectValidateConfig` Signed-off-by: hamistao --- lxd/api_project.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lxd/api_project.go b/lxd/api_project.go index db30b8b2e02b..47d0fc735829 100644 --- a/lxd/api_project.go +++ b/lxd/api_project.go @@ -294,7 +294,7 @@ func projectsPost(d *Daemon, r *http.Request) response.Response { } // Validate the configuration. - err = projectValidateConfig(s, project.Config) + err = projectValidateConfig(s, project.Config, project.Name) if err != nil { return response.BadRequest(err) } @@ -682,7 +682,7 @@ func projectChange(s *state.State, project *api.Project, req api.ProjectPut) res } // Validate the configuration. - err := projectValidateConfig(s, req.Config) + err := projectValidateConfig(s, req.Config, project.Name) if err != nil { return response.BadRequest(err) } @@ -1024,7 +1024,7 @@ func isEitherAllowOrBlockOrManaged(value string) error { return validate.Optional(validate.IsOneOf("block", "allow", "managed"))(value) } -func projectValidateConfig(s *state.State, config map[string]string) error { +func projectValidateConfig(s *state.State, config map[string]string, projectName string) error { // Validate the project configuration. projectConfigKeys := map[string]func(value string) error{ // lxdmeta:generate(entities=project; group=specific; key=backups.compression_algorithm) From 8bde8004d3dd75f207bcf7c6b8e65255fb15f9fe Mon Sep 17 00:00:00 2001 From: hamistao Date: Tue, 10 Dec 2024 16:16:35 -0300 Subject: [PATCH 04/10] lxd/api_project: Create `uplinkIPLimitValidator` We check for the current uplink IP usage on the validator function for two reasons: - Show a more informative error message in case the provided value is not appropriate. - Avoing doing the expensive computation of uplink IP usage unless a config key was provided for a valid uplink network. Signed-off-by: hamistao --- lxd/api_project.go | 49 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/lxd/api_project.go b/lxd/api_project.go index 47d0fc735829..d77d0c64c3a4 100644 --- a/lxd/api_project.go +++ b/lxd/api_project.go @@ -9,6 +9,7 @@ import ( "net" "net/http" "net/url" + "strconv" "strings" "github.com/gorilla/mux" @@ -952,6 +953,54 @@ func projectStateGet(d *Daemon, r *http.Request) response.Response { return response.SyncResponse(true, &state) } +// uplinkIPLimitValidator returns a validator function for uplink IP limits. +// The protocol argument specifies whether we should validate ipv4 or ipv6. +func uplinkIPLimitValidator(s *state.State, projectName string, networkName string, protocol string) func(string) error { + return func(value string) error { + // Perform cheaper checks on the value first. + providedIPQuota, err := strconv.Atoi(value) + if err != nil { + return err + } + + if providedIPQuota < 0 { + return fmt.Errorf("Value must be non-negative") + } + + // The results for the quota we are not interested in will be ignored in the end, so + // here -1 is used to prevent the quota that is not relevant from stopping UplinkAddressQuotasExceeded + // from returning early. + IPV4AddressQuota := -1 + IPV6AddressQuota := -1 + + if protocol == "ipv6" { + IPV6AddressQuota = providedIPQuota + } + + if protocol == "ipv4" { + IPV4AddressQuota = providedIPQuota + } + + // Check if the provided value is equal or lower to the number of uplink addresses currently in use + // on the provided project and in the specified network. + // We are only interested on the result for the desired protocol, the other will always come out as true. + err = s.DB.Cluster.Transaction(s.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { + invalidIPV4Quota, invalidIPV6Quota, err := limits.UplinkAddressQuotasExceeded(ctx, tx, projectName, networkName, IPV4AddressQuota, IPV6AddressQuota) + if err != nil { + return err + } + + if protocol == "ipv4" && invalidIPV4Quota || protocol == "ipv6" && invalidIPV6Quota { + return fmt.Errorf("Uplink %s limit %q is below current number of used uplink addresses", protocol, value) + } + + return nil + }) + + return err + } +} + // Check if a project is empty. func projectIsEmpty(ctx context.Context, project *cluster.Project, tx *db.ClusterTx) (bool, error) { instances, err := cluster.GetInstances(ctx, tx.Tx(), cluster.InstanceFilter{Project: &project.Name}) From 84dd9ab4be7b0ffb2f1b2519f4c478f4c2931fef Mon Sep 17 00:00:00 2001 From: hamistao Date: Fri, 29 Nov 2024 18:02:01 -0300 Subject: [PATCH 05/10] lxd/api_project: Introduce `limits.networks.uplink_ips.*` config keys to projects Signed-off-by: hamistao --- lxd/api_project.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/lxd/api_project.go b/lxd/api_project.go index d77d0c64c3a4..5db787b758dd 100644 --- a/lxd/api_project.go +++ b/lxd/api_project.go @@ -1463,6 +1463,36 @@ func projectValidateConfig(s *state.State, config map[string]string, projectName return fmt.Errorf("Failed loading storage pool names: %w", err) } + // Per-network project limits for uplink IPs only make sense for projects with their own networks. + if shared.IsTrue(config["features.networks"]) { + // Get networks that are allowed to be used as uplinks by this project. + allowedUplinkNetworks, err := network.AllowedUplinkNetworks(s, config) + if err != nil { + return err + } + + // Add network-specific config keys. + for _, networkName := range allowedUplinkNetworks { + // lxdmeta:generate(entities=project; group=limits; key=limits.networks.uplink_ips.ipv4.NETWORK_NAME) + // Maximum number of IPv4 addresses that this project can consume from the specified uplink network. + // This number of IPs can be consumed by networks, forwards and load balancers in this project. + // + // --- + // type: string + // shortdesc: Quota of IPv4 addresses from a specified uplink network that can be used by entities in this project + projectConfigKeys["limits.networks.uplink_ips.ipv4."+networkName] = validate.Optional(uplinkIPLimitValidator(s, projectName, networkName, "ipv4")) + + // lxdmeta:generate(entities=project; group=limits; key=limits.networks.uplink_ips.ipv6.NETWORK_NAME) + // Maximum number of IPv6 addresses that this project can consume from the specified uplink network. + // This number of IPs can be consumed by networks, forwards and load balancers in this project. + // + // --- + // type: string + // shortdesc: Quota of IPv4 addresses from a specified uplink network that can be used by entities in this project + projectConfigKeys["limits.networks.uplink_ips.ipv6."+networkName] = validate.Optional(uplinkIPLimitValidator(s, projectName, networkName, "ipv6")) + } + } + for k, v := range config { key := k From b4e83feb10777b01c32855aecf4a8d18d5fd244f Mon Sep 17 00:00:00 2001 From: hamistao Date: Mon, 9 Dec 2024 19:45:27 -0300 Subject: [PATCH 06/10] lxd/network/common: Create `projectUplinkIPQuotaAvailable` Signed-off-by: hamistao --- lxd/network/driver_common.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/lxd/network/driver_common.go b/lxd/network/driver_common.go index a3f745b1e569..7df55e6e1429 100644 --- a/lxd/network/driver_common.go +++ b/lxd/network/driver_common.go @@ -15,6 +15,7 @@ import ( "github.com/canonical/lxd/lxd/db" dbCluster "github.com/canonical/lxd/lxd/db/cluster" "github.com/canonical/lxd/lxd/network/acl" + "github.com/canonical/lxd/lxd/project/limits" "github.com/canonical/lxd/lxd/resources" "github.com/canonical/lxd/lxd/state" "github.com/canonical/lxd/shared" @@ -829,6 +830,28 @@ func (n *common) bgpGetPeers(config map[string]string) []string { return peers } +// projectUplinkIPQuotaAvailable checks if a project has quota available to assign new uplink IPs in a certain network. +func (n *common) projectUplinkIPQuotaAvailable(ctx context.Context, tx *db.ClusterTx, p *api.Project, uplinkName string) (ipv4QuotaAvailable bool, ipv6QuotaAvailable bool, err error) { + rawIPV4Quota, hasIPV4Quota := p.Config["limits.networks.uplink_ips.ipv4."+uplinkName] + rawIPV6Quota, hasIPV6Quota := p.Config["limits.networks.uplink_ips.ipv6."+uplinkName] + + // Will be 0 if the limit is not set. + ipv4AddressLimit, _ := strconv.Atoi(rawIPV4Quota) + ipv6AddressLimit, _ := strconv.Atoi(rawIPV6Quota) + + var ipv4QuotaMet bool + var ipv6QuotaMet bool + + // If limit-1 is exceeded, than that means we have no quota available. + ipv4QuotaMet, ipv6QuotaMet, err = limits.UplinkAddressQuotasExceeded(ctx, tx, p.Name, uplinkName, ipv4AddressLimit-1, ipv6AddressLimit-1) + if err != nil { + return false, false, err + } + + // Undefined quotas are always available. + return !hasIPV4Quota || !ipv4QuotaMet, !hasIPV6Quota || !ipv6QuotaMet, nil +} + // forwardValidate validates the forward request. func (n *common) forwardValidate(listenAddress net.IP, forward api.NetworkForwardPut) ([]*forwardPortMap, error) { if listenAddress == nil { From 21c95cab5d59e381d8b2a18f5c24df8ab62141c4 Mon Sep 17 00:00:00 2001 From: hamistao Date: Tue, 10 Dec 2024 00:57:18 -0300 Subject: [PATCH 07/10] lxd/network/ovn: Check uplink IP limits on `allocateUplinkAddress` Signed-off-by: hamistao --- lxd/network/driver_ovn.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lxd/network/driver_ovn.go b/lxd/network/driver_ovn.go index 0a8cacfe0282..900f9c03e4af 100644 --- a/lxd/network/driver_ovn.go +++ b/lxd/network/driver_ovn.go @@ -4731,6 +4731,8 @@ func (n *ovn) allocateUplinkAddress(listenIPAddress net.IP) (net.IP, error) { // Load the project to get uplink network restrictions. var p *api.Project var uplink *api.Network + var ipv4QuotaAvailable bool + var ipv6QuotaAvailable bool err := n.state.DB.Cluster.Transaction(n.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { project, err := dbCluster.GetProject(ctx, tx.Tx(), n.project) @@ -4749,7 +4751,9 @@ func (n *ovn) allocateUplinkAddress(listenIPAddress net.IP) (net.IP, error) { return fmt.Errorf("Failed to load uplink network %q: %w", n.config["network"], err) } - return nil + // Check project quotas for uplink IPs in this uplink. + ipv4QuotaAvailable, ipv6QuotaAvailable, err = n.projectUplinkIPQuotaAvailable(ctx, tx, p, uplink.Name) + return err }) if err != nil { return nil, err @@ -4766,6 +4770,13 @@ func (n *ovn) allocateUplinkAddress(listenIPAddress net.IP) (net.IP, error) { return nil, err } + usingIPV6 := listenIPAddress.To4() == nil + + // If there is no quota available for the required protocol, return an error. + if usingIPV6 && !ipv6QuotaAvailable || !usingIPV6 && !ipv4QuotaAvailable { + return nil, fmt.Errorf("Project quota for uplink IPs on network %q is exhausted", uplink.Name) + } + // We're auto-allocating the external IP address if the given listen address is unspecified. if listenIPAddress.IsUnspecified() { // Retrieve the raw address from listenAddressNet. From a0e8a622cf97adde008090d068fafc061e010dff Mon Sep 17 00:00:00 2001 From: hamistao Date: Tue, 10 Dec 2024 01:28:08 -0300 Subject: [PATCH 08/10] lxd/network/ovn: Check uplink IP limits on `allocateUplinkPortIPs` This is useful to prevent newly created ovn networks to exceed the allowed quota for uplink IPs in its project Signed-off-by: hamistao --- lxd/network/driver_ovn.go | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/lxd/network/driver_ovn.go b/lxd/network/driver_ovn.go index 900f9c03e4af..746c324ec4f9 100644 --- a/lxd/network/driver_ovn.go +++ b/lxd/network/driver_ovn.go @@ -1232,15 +1232,47 @@ func (n *ovn) allocateUplinkPortIPs(uplinkNet Network, routerMAC net.HardwareAdd routerExtPortIPv4 := net.ParseIP(n.config[ovnVolatileUplinkIPv4]) routerExtPortIPv6 := net.ParseIP(n.config[ovnVolatileUplinkIPv6]) + // Get project's config. + var p *api.Project + var ipv4QuotaAvailable bool + var ipv6QuotaAvailable bool + + err = n.state.DB.Cluster.Transaction(n.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { + dbProject, err := dbCluster.GetProject(ctx, tx.Tx(), n.project) + if err != nil { + return err + } + + p, err = dbProject.ToAPI(ctx, tx.Tx()) + if err != nil { + return err + } + + // Check if we have quota available for the addresses we want to allocate for the new network. + ipv4QuotaAvailable, ipv6QuotaAvailable, err = n.projectUplinkIPQuotaAvailable(ctx, tx, p, uplinkNet.Name()) + return err + }) + if err != nil { + return nil, err + } + + allocatingIPV4 := uplinkIPv4Net != nil && routerExtPortIPv4 == nil + allocatingIPV6 := uplinkIPv6Net != nil && routerExtPortIPv6 == nil + + // A quota check result is only relevant if we intent to allocate an IP fot that quota's protocol. + if allocatingIPV4 && !ipv4QuotaAvailable || allocatingIPV6 && !ipv6QuotaAvailable { + return nil, fmt.Errorf("Project quota for uplink IPs on network %q is exhausted", uplinkNet.Name()) + } + // Decide whether we need to allocate new IP(s) and go to the expense of retrieving all allocated IPs. - if (uplinkIPv4Net != nil && routerExtPortIPv4 == nil) || (uplinkIPv6Net != nil && routerExtPortIPv6 == nil) { + if (allocatingIPV4) || (allocatingIPV6) { err := n.state.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { allAllocatedIPv4, allAllocatedIPv6, err := n.uplinkAllAllocatedIPs(ctx, tx, uplinkNet.Name()) if err != nil { return fmt.Errorf("Failed to get all allocated IPs for uplink: %w", err) } - if uplinkIPv4Net != nil && routerExtPortIPv4 == nil { + if allocatingIPV4 { if uplinkNetConf["ipv4.ovn.ranges"] == "" { return fmt.Errorf(`Missing required "ipv4.ovn.ranges" config key on uplink network`) } @@ -1267,7 +1299,7 @@ func (n *ovn) allocateUplinkPortIPs(uplinkNet Network, routerMAC net.HardwareAdd n.config[ovnVolatileUplinkIPv4] = routerExtPortIPv4.String() } - if uplinkIPv6Net != nil && routerExtPortIPv6 == nil { + if allocatingIPV6 { // If IPv6 OVN ranges are specified by the uplink, allocate from them. if uplinkNetConf["ipv6.ovn.ranges"] != "" { dhcpSubnet := uplinkNet.DHCPv6Subnet() From 46bf5931a4c4e4186c39a225f131683bf54f8177 Mon Sep 17 00:00:00 2001 From: hamistao Date: Tue, 10 Dec 2024 07:35:15 -0300 Subject: [PATCH 09/10] make update-metadata Signed-off-by: hamistao --- doc/metadata.txt | 16 ++++++++++++++++ lxd/metadata/configuration.json | 14 ++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/doc/metadata.txt b/doc/metadata.txt index a29c93f568ea..0516a3859197 100644 --- a/doc/metadata.txt +++ b/doc/metadata.txt @@ -4053,6 +4053,22 @@ The value is the maximum value for the sum of the individual {config:option}`ins ``` +```{config:option} limits.networks.uplink_ips.ipv4.NETWORK_NAME project-limits +:shortdesc: "Quota of IPv4 addresses from a specified uplink network that can be used by entities in this project" +:type: "string" +Maximum number of IPv4 addresses that this project can consume from the specified uplink network. +This number of IPs can be consumed by networks, forwards and load balancers in this project. + +``` + +```{config:option} limits.networks.uplink_ips.ipv6.NETWORK_NAME project-limits +:shortdesc: "Quota of IPv4 addresses from a specified uplink network that can be used by entities in this project" +:type: "string" +Maximum number of IPv6 addresses that this project can consume from the specified uplink network. +This number of IPs can be consumed by networks, forwards and load balancers in this project. + +``` + ```{config:option} limits.processes project-limits :shortdesc: "Maximum number of processes within the project" :type: "integer" diff --git a/lxd/metadata/configuration.json b/lxd/metadata/configuration.json index 2d707fefb758..5b099c44ffac 100644 --- a/lxd/metadata/configuration.json +++ b/lxd/metadata/configuration.json @@ -4603,6 +4603,20 @@ "type": "integer" } }, + { + "limits.networks.uplink_ips.ipv4.NETWORK_NAME": { + "longdesc": "Maximum number of IPv4 addresses that this project can consume from the specified uplink network.\nThis number of IPs can be consumed by networks, forwards and load balancers in this project.\n", + "shortdesc": "Quota of IPv4 addresses from a specified uplink network that can be used by entities in this project", + "type": "string" + } + }, + { + "limits.networks.uplink_ips.ipv6.NETWORK_NAME": { + "longdesc": "Maximum number of IPv6 addresses that this project can consume from the specified uplink network.\nThis number of IPs can be consumed by networks, forwards and load balancers in this project.\n", + "shortdesc": "Quota of IPv4 addresses from a specified uplink network that can be used by entities in this project", + "type": "string" + } + }, { "limits.processes": { "longdesc": "This value is the maximum value for the sum of the individual {config:option}`instance-resource-limits:limits.processes` configurations set on the instances of the project.", From 2e5ca4f50e83e53db3f5c16a0948b615c82dba31 Mon Sep 17 00:00:00 2001 From: hamistao Date: Thu, 23 Jan 2025 21:33:56 -0300 Subject: [PATCH 10/10] test/suites: Add tests for project uplink IP limits It has a couple lines of unrelated tests thrown in as well, such as testing a forward/load-balancer can't use listen addresses outside uplink routes. Signed-off-by: hamistao --- test/suites/network_ovn.sh | 143 +++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) diff --git a/test/suites/network_ovn.sh b/test/suites/network_ovn.sh index 14b0c1f9fdba..dfbaf8b6d5c8 100644 --- a/test/suites/network_ovn.sh +++ b/test/suites/network_ovn.sh @@ -235,6 +235,149 @@ test_network_ovn() { # Clean up. lxc delete c1 --force lxc network delete "${ovn_network}" + + # Create project for following tests. + lxc project create testovn \ + -c features.images=false \ + -c features.profiles=false \ + -c features.storage.volumes=false + + lxc project switch testovn + + # Project uplink IP limits are exclusive to projects with features.networks enabled. + ! lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 0 || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 0 || false + lxc project set testovn features.networks true + lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 3 + lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 3 + + # We cannot restrict a project with uplink IP limits set. + lxc project set testovn features.profiles true # Needed to restrict project + ! lxc project set testovn restricted true || false + lxc project unset testovn limits.networks.uplink_ips.ipv4."${uplink_network}" + lxc project unset testovn limits.networks.uplink_ips.ipv6."${uplink_network}" + lxc project set testovn restricted true + + # We cannot set uplink IP limits on a restricted project unless the target network is in its allowed uplinks. + ! lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 1 || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 1 || false + lxc project set testovn restricted.networks.uplinks="${uplink_network}" + lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 1 + lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 1 + + # Project uplink IP limits have to be non negative numbers. + ! lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" true || false + ! lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" something || false + ! lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" -1 || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" true || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" something || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" -1 || false + + # Check project uplink IP limits are enforced on OVN network creation. + lxc network create first-ovn-network network="${uplink_network}" + ! lxc network create second-ovn-network network="${uplink_network}" --type=ovn || false + lxc network delete first-ovn-network + lxc network create second-ovn-network network="${uplink_network}" --type=ovn + + # Only when both limits are relaxed, we are able to create another network. + ! lxc network create failed-ovn-network --project testovn --type=ovn || false + lxc project unset testovn limits.networks.uplink_ips.ipv6."${uplink_network}" + ! lxc network create failed-ovn-network --project testovn --type=ovn || false + lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 2 + lxc network create third-ovn-network --project testovn --type=ovn + + # Cannot set uplink IP limits lower than the currently used uplink IPs. + lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 3 + lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 3 + ! lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 1 || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 1 || false + lxc network delete third-ovn-network --project testovn + lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 1 + + # Cannot set uplink IP limits for a network that is not suitable to be an uplink. + ! lxc project set testovn limits.networks.uplink_ips.ipv4.non-existent 2 || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6.non-existent 2 || false + ! lxc project set testovn limits.networks.uplink_ips.ipv4.third-ovn-network 2 || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6.third-ovn-network 2 || false + + # A bit of cleanup. + lxc network delete second-ovn-network --project testovn + ! lxc project unset testovn restricted.networks.uplinks || false # Cannot unset while having limits set for the uplink network. + lxc project set testovn restricted false + lxc project unset testovn restricted.networks.uplinks + lxc project unset testovn limits.networks.uplink_ips.ipv4."${uplink_network}" + lxc project unset testovn limits.networks.uplink_ips.ipv6."${uplink_network}" + lxc project set testovn features.profiles false + + # Create an OVN network. + project_ovn_network="project-ovn$$" + lxc network create "${project_ovn_network}" --type ovn network="${uplink_network}" \ + ipv4.address=10.24.140.1/24 ipv4.nat=true \ + ipv6.address=fd42:bd85:5f89:5293::1/64 ipv6.nat=true + + # No forward can be created with a listen address that is not in the uplink's routes + ! lxc network forward create "${project_ovn_network}" 192.0.3.1 || false + ! lxc network forward create "${project_ovn_network}" 2001:db8:1:3::1 || false + + # Create a couple of forwards without a target address. + lxc network forward create "${project_ovn_network}" 192.0.2.1 + lxc network forward create "${project_ovn_network}" 2001:db8:1:2::1 + [ "$(ovn-nbctl list load_balancer | grep -cF name)" = 0 ] + + # Cannot set uplink IP limits lower than the currently used uplink IPs. + # There is one ovn network created and one forward of each protocol, so 2 IPs in use for each protocol. + ! lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 1 || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 1 || false + lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 2 + lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 2 + + # Check project uplink IP limits are enforced on network forward creation. + ! lxc network forward create "${project_ovn_network}" 192.0.2.2 || false + ! lxc network forward create "${project_ovn_network}" 2001:db8:1:2::2 || false + lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 3 + lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 3 + lxc network forward create "${project_ovn_network}" 192.0.2.2 + lxc network forward create "${project_ovn_network}" 2001:db8:1:2::2 + + # Clean up + lxc network forward delete "${project_ovn_network}" 192.0.2.2 + lxc network forward delete "${project_ovn_network}" 2001:db8:1:2::2 + lxc project unset testovn limits.networks.uplink_ips.ipv4."${uplink_network}" + lxc project unset testovn limits.networks.uplink_ips.ipv6."${uplink_network}" + lxc network forward delete "${project_ovn_network}" 192.0.2.1 + lxc network forward delete "${project_ovn_network}" 2001:db8:1:2::1 + + # No forward can be created with a listen address that is not in the uplink's routes + ! lxc network load-balancer create "${project_ovn_network}" 192.0.3.1 || false + ! lxc network load-balancer create "${project_ovn_network}" 2001:db8:1:3::1 || false + + # Create a couple of load balancers. + lxc network load-balancer create "${project_ovn_network}" 192.0.2.1 + lxc network load-balancer create "${project_ovn_network}" 2001:db8:1:2::1 + [ "$(ovn-nbctl list load_balancer | grep -cF name)" = 0 ] + + # Cannot set uplink IP limits lower than the currently used uplink IPs. + # There is one ovn network created and one load balancer for each protocol, so 2 IPs in use for each protocol. + ! lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 1 || false + ! lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 1 || false + lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 2 + lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 2 + + # Check project uplink IP limits are enforced on load balancer creation. + ! lxc network load-balancer create "${project_ovn_network}" 192.0.2.2 || false + ! lxc network load-balancer create "${project_ovn_network}" 2001:db8:1:2::2 || false + lxc project set testovn limits.networks.uplink_ips.ipv4."${uplink_network}" 3 + lxc project set testovn limits.networks.uplink_ips.ipv6."${uplink_network}" 3 + lxc network load-balancer create "${project_ovn_network}" 192.0.2.2 + lxc network load-balancer create "${project_ovn_network}" 2001:db8:1:2::2 + + # Clean up + lxc network load-balancer delete "${project_ovn_network}" 192.0.2.2 + lxc network load-balancer delete "${project_ovn_network}" 2001:db8:1:2::2 + lxc network delete "${project_ovn_network}" + lxc project switch default + lxc project delete testovn + lxc network delete "${uplink_network}" # Validate northbound database is now empty.