Skip to content

Commit

Permalink
Add LB w/ IPAM Pool
Browse files Browse the repository at this point in the history
  • Loading branch information
albinsun authored and lanfon72 committed Apr 2, 2024
1 parent 1747c15 commit 2e947a7
Show file tree
Hide file tree
Showing 6 changed files with 113 additions and 54 deletions.
1 change: 1 addition & 0 deletions apiclient/harvester_api/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def load_managers(self, version="0.0.0"):
self.keypairs = mgrs.KeypairManager.for_version(version)(self, version)
self.images = mgrs.ImageManager.for_version(version)(self, version)
self.networks = mgrs.NetworkManager.for_version(version)(self, version)
self.ippools = mgrs.IPPoolManager.for_version(version)(self, version)
self.volumes = mgrs.VolumeManager.for_version(version)(self, version)
self.volsnapshots = mgrs.VolumeSnapshotManager.for_version(version)(self, version)
self.templates = mgrs.TemplateManager.for_version(version)(self, version)
Expand Down
4 changes: 2 additions & 2 deletions apiclient/harvester_api/managers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from .backups import BackupManager, VirtualMachineSnapshotManager
from .keypairs import KeypairManager
from .settings import SettingManager
from .networks import NetworkManager
from .networks import NetworkManager, IPPoolManager
from .templates import TemplateManager
from .supportbundles import SupportBundleManager
from .storageclasses import StorageClassManager
Expand All @@ -25,7 +25,7 @@
"VolumeManager",
"KeypairManager",
"SettingManager",
"NetworkManager",
"NetworkManager", "IPPoolManager",
"TemplateManager",
"SupportBundleManager",
"VolumeSnapshotManager",
Expand Down
41 changes: 41 additions & 0 deletions apiclient/harvester_api/managers/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,3 +66,44 @@ def update(self, *args, **kwargs):
def delete(self, name, namespace=DEFAULT_NAMESPACE, *, raw=False):
path = self.PATH_fmt.format(uid=name, ns=namespace, NETWORK_API=self.API_VERSION)
return self._delete(path, raw=raw)


class IPPoolManager(BaseManager):
PATH_fmt = "{API_VERSION}/harvester/loadbalancer.harvesterhci.io.ippools{name}"
API_VERSION = "v1"

def create_data(self, name, ip_pool_subnet, network_id):
return {
"type": "loadbalancer.harvesterhci.io.ippool",
"metadata": {
"name": name
},
"spec": {
"ranges": [{
"subnet": ip_pool_subnet,
"gateway": "",
"type": "cidr"
}],
"selector": {
"network": network_id,
"scope": [{
"namespace": "*",
"project": "*",
"guestCluster": "*"
}]
}
}
}

def create(self, name, ip_pool_subnet, network_id, *, raw=False):
data = self.create_data(name, ip_pool_subnet, network_id)
path = self.PATH_fmt.format(name="", API_VERSION=self.API_VERSION)
return self._create(path, json=data, raw=raw)

def get(self, name="", *, raw=False):
path = self.PATH_fmt.format(name=f"/{name}", API_VERSION=self.API_VERSION)
return self._get(path, raw=raw)

def delete(self, name, *, raw=False):
path = self.PATH_fmt.format(name=f"/{name}", API_VERSION=self.API_VERSION)
return self._delete(path, raw=raw)
1 change: 1 addition & 0 deletions config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ host-private-key: ''
vlan-id: 1
# Physical NIC for VLAN. Default is "harvester-mgmt"
vlan-nic: 'harvester-mgmt'
ip-pool-subnet: '192.168.0.0/24'

# Wait time for polling operations
wait-timeout: 600
Expand Down
6 changes: 6 additions & 0 deletions harvester_e2e_tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,12 @@ def pytest_addoption(parser):
default=config_data['vlan-nic'],
help='Physical NIC for VLAN. Default is "eth0"'
)
parser.addoption(
'--ip-pool-subnet',
action='store',
default=config_data['ip-pool-subnet'],
help='IP pool range for load balancer'
)
parser.addoption(
'--wait-timeout',
action='store',
Expand Down
114 changes: 62 additions & 52 deletions harvester_e2e_tests/integrations/test_9_rancher_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,14 @@
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com

import re
import os
import warnings

import pytest


pytest_plugins = [
'harvester_e2e_tests.fixtures.api_client',
'harvester_e2e_tests.fixtures.rancher_api_client',
"harvester_e2e_tests.fixtures.images",
]


Expand All @@ -45,32 +43,30 @@ def vlan_network(request, api_client):
f"Failed to create network-attachment-definition {network_name} \
with error {code}, {data}"
)
namespace = data['metadata']['namespace']
name = data['metadata']['name']

yield {
"id": data['metadata']['name']
"name": name,
"id": f"{namespace}/{name}"
}

api_client.networks.delete(network_name)


@pytest.fixture(scope="module")
def focal_image_url(request):
external = 'https://cloud-images.ubuntu.com/focal/current/'
base_url = request.config.getoption('--image-cache-url') or external
return os.path.join(base_url, "focal-server-cloudimg-amd64.img")
def ubuntu_image(api_client, unique_name, image_ubuntu, polling_for):
name = f"ubuntu-{unique_name}"


@pytest.fixture(scope="module")
def focal_image(api_client, unique_name, focal_image_url, polling_for):
code, data = api_client.images.create_by_url(unique_name, focal_image_url)
code, data = api_client.images.create_by_url(name, image_ubuntu.url)
assert 201 == code, (
f"Failed to upload focal image with error: {code}, {data}"
f"Failed to upload ubuntu image with error: {code}, {data}"
)

code, data = polling_for(
f"image {unique_name} to be ready",
f"image {name} to be ready",
lambda code, data: data.get('status', {}).get('progress', None) == 100,
api_client.images.get, unique_name
api_client.images.get, name
)
namespace = data['metadata']['namespace']
name = data['metadata']['name']
Expand All @@ -80,7 +76,7 @@ def focal_image(api_client, unique_name, focal_image_url, polling_for):
"id": f"{namespace}/{name}"
}

api_client.images.delete(name, namespace)
api_client.images.delete(name)


@pytest.fixture(scope='module')
Expand All @@ -90,7 +86,9 @@ def harvester_mgmt_cluster(api_client, rancher_api_client, unique_name, polling_
cluster_name = f"hvst-{unique_name}"

code, data = rancher_api_client.mgmt_clusters.create_harvester(cluster_name)
assert 201 == code, (code, data)
assert 201 == code, (
f"Failed to create Harvester entry {cluster_name} with error: {code}, {data}"
)

code, data = polling_for(
f"finding clusterName in MgmtCluster {cluster_name}",
Expand All @@ -99,8 +97,8 @@ def harvester_mgmt_cluster(api_client, rancher_api_client, unique_name, polling_
)

yield {
"name": cluster_name, # e.g. myrke2 or myhvst ...
"id": data['status']['clusterName'] # e.g. c-m-n6bsktxb
"name": cluster_name,
"id": data['status']['clusterName'] # e.g. c-m-n6bsktxb
}

rancher_api_client.mgmt_clusters.delete(cluster_name)
Expand Down Expand Up @@ -132,32 +130,20 @@ def harvester_cloud_credential(api_client, rancher_api_client,


@pytest.fixture(scope='module')
def rke1_k8s_version(request, k8s_version, rancher_api_client):
def rke1_k8s_version(request, rancher_api_client):
configured = request.config.getoption("--RKE1-version")
if configured:
return configured

# `v1.24.11+rke2r1` -> `v1.24.11-rancher2-1`
version = re.sub(r'\+rke(\d+)r(\d+)', lambda g: "-rancher%s-%s" % g.groups(), k8s_version)

code, data = rancher_api_client.settings.get('k8s-versions-current')
assert 200 == code, (code, data)
current = data['value']
if version in current:
return version

code, data = rancher_api_client.settings.get('k8s-versions-deprecated')
assert 200 == code, (code, data)
if data['value'] and version in data['value']:
return version

latest = current.split(',')[-1]
assert 200 == code, (
f"Failed to get k8s-versions-current setting with error: {code}, {data}"
)
latest = data['value'].split(',')[-1]

warnings.warn(UserWarning(
f"Kubernetes version {version} is not in supported list,"
f" change to use latest version {latest} instead."
f"RKE1-version is not configured, use latest Rancher supported version {latest}."
))

return latest


Expand All @@ -173,7 +159,7 @@ def rke1_cluster(unique_name, rancher_api_client, machine_count):
name = f"rke1-{unique_name}-{machine_count}"
yield {
"name": name,
"id": "", # set in Test_RKE1::test_create_rke1
"id": "", # set in Test_RKE1::test_create_rke1, e.g. c-m-n6bsktxb
"machine_count": machine_count
}

Expand All @@ -185,7 +171,7 @@ def rke2_cluster(unique_name, rancher_api_client, machine_count):
name = f"rke2-{unique_name}-{machine_count}"
yield {
"name": name,
"id": "", # set in Test_RKE2::test_create_rke2
"id": "", # set in Test_RKE2::test_create_rke2, e.g. c-m-n6bsktxb
"machine_count": machine_count
}

Expand All @@ -211,17 +197,35 @@ def nginx_deployment(unique_name):
}


@pytest.fixture(scope='class')
def lb_service(unique_name, nginx_deployment):
@pytest.fixture(scope="class")
def ip_pool(request, api_client, unique_name, vlan_network):
name = f"ippool-{unique_name}"
ip_pool_subnet = request.config.getoption('--ip-pool-subnet')

code, data = api_client.ippools.create(name, ip_pool_subnet, vlan_network["id"])
assert 201 == code, (
f"Failed to create ip pool {name} with error: {code}, {data}"
)

yield {
"name": name,
"subnet": ip_pool_subnet
}

api_client.ippools.delete(name)


@pytest.fixture(scope='class', params=["dhcp", "pool"])
def lb_service(request, unique_name, nginx_deployment, ip_pool):
namespace = "default"
name = f"lb-{unique_name}"
name = f"lb-{unique_name}-{request.param}"
data = {
"type": "service",
"metadata": {
"namespace": namespace,
"name": name,
"annotations": {
"cloudprovider.harvesterhci.io/ipam": "dhcp"
"cloudprovider.harvesterhci.io/ipam": request.param
}
},
"spec": {
Expand All @@ -241,7 +245,7 @@ def lb_service(unique_name, nginx_deployment):
}
}

return {
yield {
"namespace": namespace,
"name": name,
"data": data
Expand Down Expand Up @@ -332,7 +336,7 @@ def test_add_project_owner_user(api_client, rancher_api_client, unique_name, wai
class TestRKE2:
@pytest.mark.dependency(depends=["import_harvester"], name="create_rke2")
def test_create_rke2(self, rancher_api_client, unique_name, harvester_mgmt_cluster,
harvester_cloud_credential, rke2_cluster, focal_image, vlan_network,
harvester_cloud_credential, rke2_cluster, ubuntu_image, vlan_network,
k8s_version, rancher_wait_timeout, polling_for):
# Create Harvester kubeconfig for this RKE2 cluster
code, data = rancher_api_client.kube_configs.create(
Expand Down Expand Up @@ -369,9 +373,9 @@ def test_create_rke2(self, rancher_api_client, unique_name, harvester_mgmt_clust
cpus="2",
mems="4",
disks="40",
image_id=focal_image['id'],
network_id=vlan_network['id'],
ssh_user=focal_image['ssh_user'],
image_id=ubuntu_image['id'],
network_id=vlan_network['name'],
ssh_user=ubuntu_image['ssh_user'],
user_data=(
"#cloud-config\n"
"password: test\n"
Expand Down Expand Up @@ -595,6 +599,9 @@ def test_load_balancer_service(self, rancher_api_client, rke2_cluster, nginx_dep
f"Service data: {data}"
)

# teardown
rancher_api_client.cluster_services.delete(rke2_cluster['id'], lb_service["name"])

@pytest.mark.dependency(depends=["create_rke2"])
def test_delete_rke2(self, api_client, rancher_api_client, rke2_cluster,
rancher_wait_timeout, polling_for):
Expand Down Expand Up @@ -628,7 +635,7 @@ class TestRKE1:
def test_create_rke1(self, rancher_api_client, unique_name, harvester_mgmt_cluster,
rancher_wait_timeout,
rke1_cluster, rke1_k8s_version, harvester_cloud_credential,
focal_image, vlan_network, polling_for):
ubuntu_image, vlan_network, polling_for):
code, data = rancher_api_client.kube_configs.create(
rke1_cluster['name'],
harvester_mgmt_cluster['id']
Expand All @@ -642,9 +649,9 @@ def test_create_rke1(self, rancher_api_client, unique_name, harvester_mgmt_clust
cpus=2,
mems=4,
disks=40,
image_id=focal_image['id'],
network_id=vlan_network['id'],
ssh_user=focal_image['ssh_user'],
image_id=ubuntu_image['id'],
network_id=vlan_network['name'],
ssh_user=ubuntu_image['ssh_user'],
cloud_credential_id=harvester_cloud_credential['id'],
user_data=(
"#cloud-config\n"
Expand Down Expand Up @@ -849,6 +856,9 @@ def test_load_balancer_service(self, rancher_api_client, rke1_cluster, nginx_dep
f"Service data: {data}"
)

# teardown
rancher_api_client.cluster_services.delete(rke1_cluster['id'], lb_service["name"])

# harvester-csi-driver
@pytest.mark.dependency(depends=["create_rke1"], name="csi_driver_chart")
def test_csi_driver_chart(self, rancher_api_client, rke1_cluster, polling_for):
Expand Down

0 comments on commit 2e947a7

Please sign in to comment.