-
Notifications
You must be signed in to change notification settings - Fork 30
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Adding Networkpolicy to ClusterSets #125
Changes from 2 commits
36faca6
f1936c0
eb7afb2
7dc8372
615c7e6
3ae9a39
eff4423
001de59
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -6,3 +6,4 @@ | |
.idea | ||
.vscode/ | ||
__debug* | ||
*-kubeconfig.yaml |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,176 @@ | ||
--- | ||
apiVersion: apiextensions.k8s.io/v1 | ||
kind: CustomResourceDefinition | ||
metadata: | ||
annotations: | ||
controller-gen.kubebuilder.io/version: v0.14.0 | ||
name: clustersets.k3k.io | ||
spec: | ||
group: k3k.io | ||
names: | ||
kind: ClusterSet | ||
listKind: ClusterSetList | ||
plural: clustersets | ||
singular: clusterset | ||
scope: Namespaced | ||
versions: | ||
- name: v1alpha1 | ||
schema: | ||
openAPIV3Schema: | ||
properties: | ||
apiVersion: | ||
description: |- | ||
APIVersion defines the versioned schema of this representation of an object. | ||
Servers should convert recognized schemas to the latest internal value, and | ||
may reject unrecognized values. | ||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | ||
type: string | ||
kind: | ||
description: |- | ||
Kind is a string value representing the REST resource this object represents. | ||
Servers may infer this from the endpoint the client submits requests to. | ||
Cannot be updated. | ||
In CamelCase. | ||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | ||
type: string | ||
metadata: | ||
type: object | ||
spec: | ||
description: Spec is the spec of the ClusterSet | ||
properties: | ||
defaultLimits: | ||
description: DefaultLimits are the limits used for servers/agents | ||
when a cluster in the set doesn't provide any | ||
properties: | ||
serverLimit: | ||
additionalProperties: | ||
anyOf: | ||
- type: integer | ||
- type: string | ||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ | ||
x-kubernetes-int-or-string: true | ||
description: ServerLimit is the limits (cpu/mem) that apply to | ||
the server nodes | ||
type: object | ||
workerLimit: | ||
additionalProperties: | ||
anyOf: | ||
- type: integer | ||
- type: string | ||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ | ||
x-kubernetes-int-or-string: true | ||
description: WorkerLimit is the limits (cpu/mem) that apply to | ||
the agent nodes | ||
type: object | ||
type: object | ||
defaultNodeSelector: | ||
additionalProperties: | ||
type: string | ||
description: DefaultNodeSelector is the node selector that applies | ||
to all clusters (server + agent) in the set | ||
type: object | ||
maxLimits: | ||
additionalProperties: | ||
anyOf: | ||
- type: integer | ||
- type: string | ||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ | ||
x-kubernetes-int-or-string: true | ||
description: MaxLimits are the limits that apply to all clusters (server | ||
+ agent) in the set | ||
type: object | ||
type: object | ||
status: | ||
description: Status is the status of the ClusterSet | ||
properties: | ||
conditions: | ||
description: Conditions are the invidual conditions for the cluster | ||
set | ||
items: | ||
description: "Condition contains details for one aspect of the current | ||
state of this API Resource.\n---\nThis struct is intended for | ||
direct use as an array at the field path .status.conditions. For | ||
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the | ||
observations of a foo's current state.\n\t // Known .status.conditions.type | ||
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // | ||
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t | ||
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" | ||
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t | ||
\ // other fields\n\t}" | ||
properties: | ||
lastTransitionTime: | ||
description: |- | ||
lastTransitionTime is the last time the condition transitioned from one status to another. | ||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. | ||
format: date-time | ||
type: string | ||
message: | ||
description: |- | ||
message is a human readable message indicating details about the transition. | ||
This may be an empty string. | ||
maxLength: 32768 | ||
type: string | ||
observedGeneration: | ||
description: |- | ||
observedGeneration represents the .metadata.generation that the condition was set based upon. | ||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date | ||
with respect to the current state of the instance. | ||
format: int64 | ||
minimum: 0 | ||
type: integer | ||
reason: | ||
description: |- | ||
reason contains a programmatic identifier indicating the reason for the condition's last transition. | ||
Producers of specific condition types may define expected values and meanings for this field, | ||
and whether the values are considered a guaranteed API. | ||
The value should be a CamelCase string. | ||
This field may not be empty. | ||
maxLength: 1024 | ||
minLength: 1 | ||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ | ||
type: string | ||
status: | ||
description: status of the condition, one of True, False, Unknown. | ||
enum: | ||
- "True" | ||
- "False" | ||
- Unknown | ||
type: string | ||
type: | ||
description: |- | ||
type of condition in CamelCase or in foo.example.com/CamelCase. | ||
--- | ||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be | ||
useful (see .node.status.conditions), the ability to deconflict is important. | ||
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) | ||
maxLength: 316 | ||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ | ||
type: string | ||
required: | ||
- lastTransitionTime | ||
- message | ||
- reason | ||
- status | ||
- type | ||
type: object | ||
type: array | ||
lastUpdateTime: | ||
description: LastUpdate is the timestamp when the status was last | ||
updated | ||
type: string | ||
observedGeneration: | ||
description: ObservedGeneration was the generation at the time the | ||
status was updated. | ||
format: int64 | ||
type: integer | ||
summary: | ||
description: Sumamry is a summary of the status (error, ready) | ||
type: string | ||
type: object | ||
required: | ||
- spec | ||
type: object | ||
served: true | ||
storage: true | ||
subresources: | ||
status: {} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
apiVersion: v1 | ||
kind: Service | ||
metadata: | ||
name: k3k-webhook | ||
labels: | ||
{{- include "k3k.labels" . | nindent 4 }} | ||
namespace: {{ .Values.namespace }} | ||
spec: | ||
ports: | ||
- port: 443 | ||
protocol: TCP | ||
name: https-webhook | ||
targetPort: 9443 | ||
selector: | ||
{{- include "k3k.selectorLabels" . | nindent 6 }} |
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -4,32 +4,45 @@ package main | |||||
import ( | ||||||
"context" | ||||||
"flag" | ||||||
"os" | ||||||
|
||||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" | ||||||
"github.com/rancher/k3k/pkg/controller/cluster" | ||||||
"github.com/rancher/k3k/pkg/controller/clusterset" | ||||||
"k8s.io/apimachinery/pkg/runtime" | ||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme" | ||||||
"k8s.io/client-go/tools/clientcmd" | ||||||
"k8s.io/klog/v2" | ||||||
ctrl "sigs.k8s.io/controller-runtime" | ||||||
ctrlconfig "sigs.k8s.io/controller-runtime/pkg/client/config" | ||||||
"sigs.k8s.io/controller-runtime/pkg/manager" | ||||||
) | ||||||
|
||||||
var Scheme = runtime.NewScheme() | ||||||
const ( | ||||||
clusterCIDRFlagName = "cluster-cidr" | ||||||
clusterCIDREnvVar = "CLUSTER_CIDR" | ||||||
KubeconfigFlagName = "kubeconfig" | ||||||
) | ||||||
|
||||||
var ( | ||||||
Scheme = runtime.NewScheme() | ||||||
galal-hussein marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
clusterCIDR string | ||||||
kubeconfig string | ||||||
) | ||||||
|
||||||
func init() { | ||||||
_ = clientgoscheme.AddToScheme(Scheme) | ||||||
_ = v1alpha1.AddToScheme(Scheme) | ||||||
} | ||||||
|
||||||
func main() { | ||||||
ctrlconfig.RegisterFlags(nil) | ||||||
flag.Parse() | ||||||
|
||||||
fs := addFlags() | ||||||
fs.Parse(os.Args[1:]) | ||||||
ctx := context.Background() | ||||||
|
||||||
kubeconfig := flag.Lookup("kubeconfig").Value.String() | ||||||
if clusterCIDR == "" { | ||||||
clusterCIDR = os.Getenv(clusterCIDREnvVar) | ||||||
} | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The CLI uses urfave/cli to allow us to read values either from the env var or the CLI. This part of the code doesn't do that, and because of that needs to more manually parse from both places. I think that we should do what the CLI does here - to unify the approaches and avoid duplicating the code. |
||||||
|
||||||
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig) | ||||||
if err != nil { | ||||||
klog.Fatalf("Failed to create config from kubeconfig file: %v", err) | ||||||
|
@@ -38,12 +51,26 @@ func main() { | |||||
mgr, err := ctrl.NewManager(restConfig, manager.Options{ | ||||||
Scheme: Scheme, | ||||||
}) | ||||||
|
||||||
if err != nil { | ||||||
klog.Fatalf("Failed to create new controller runtime manager: %v", err) | ||||||
} | ||||||
|
||||||
if err := cluster.Add(ctx, mgr); err != nil { | ||||||
klog.Fatalf("Failed to add the new controller: %v", err) | ||||||
klog.Fatalf("Failed to add the new cluster controller: %v", err) | ||||||
} | ||||||
|
||||||
if err := cluster.AddPodController(ctx, mgr); err != nil { | ||||||
klog.Fatalf("Failed to add the new cluster controller: %v", err) | ||||||
} | ||||||
klog.Info("adding clusterset controller") | ||||||
if err := clusterset.Add(ctx, mgr, clusterCIDR); err != nil { | ||||||
klog.Fatalf("Failed to add the clusterset controller: %v", err) | ||||||
} | ||||||
|
||||||
klog.Info("adding networkpolicy node controller") | ||||||
if err := clusterset.AddNodeController(ctx, mgr, clusterCIDR); err != nil { | ||||||
klog.Fatalf("Failed to add the clusterset controller: %v", err) | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
} | ||||||
|
||||||
if err := cluster.AddPodController(ctx, mgr); err != nil { | ||||||
|
@@ -54,3 +81,10 @@ func main() { | |||||
klog.Fatalf("Failed to start the manager: %v", err) | ||||||
} | ||||||
} | ||||||
|
||||||
func addFlags() *flag.FlagSet { | ||||||
fs := flag.NewFlagSet("k3k", flag.ExitOnError) | ||||||
fs.StringVar(&clusterCIDR, clusterCIDRFlagName, "", "The host's cluster CIDR") | ||||||
fs.StringVar(&kubeconfig, KubeconfigFlagName, "", "Paths to a kubeconfig. Only required if out-of-cluster.") | ||||||
return fs | ||||||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Might be good to comment/define the behavior for cases where people don't supply the clusterCIDR but do try to enable network policies (i.e. do we then try to extract information from the available nodes)?
Also I think it would be good to provide more information to help people connect this to the specific values in the k8s distribution that they are using. Meaning, this is a pod CIDR - so it would be good to state that specifically to make it easier for people to look it up on the cluster.