-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathetcd-persistence-cluster.yaml
121 lines (117 loc) · 3.39 KB
/
etcd-persistence-cluster.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
apiVersion: v1
kind: Service
metadata:
name: "etcd-cluster-client"
spec:
ports:
- port: 2379
name: client
clusterIP: None
selector:
component: "etcd"
---
apiVersion: v1
kind: Service
metadata:
name: "etcd"
spec:
publishNotReadyAddresses: true
ports:
- port: 2379
name: client
- port: 2380
name: peer
clusterIP: None
selector:
component: "etcd"
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: "etcd"
labels:
component: "etcd"
spec:
serviceName: "etcd"
# changing replicas value will require a manual etcdctl member remove/add
# command (remove before decreasing and add after increasing)
replicas: 3
template:
metadata:
name: "etcd"
labels:
component: "etcd"
spec:
securityContext:
runAsUser: 1000
fsGroup: 1000
containers:
- name: "etcd"
image: "quay.io/coreos/etcd:v3.2.3"
ports:
- containerPort: 2379
name: client
- containerPort: 2380
name: peer
env:
- name: CLUSTER_SIZE
value: "3"
- name: SET_NAME
value: "etcd"
- name: ETCDCTL_API
value: "3"
resources:
limits:
cpu: 500m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- name: data
mountPath: /var/run/etcd
command:
- "/bin/sh"
- "-ecx"
- |
IP=$(hostname -i)
# there's no need to wait since it'll cause problems when
# restarting an already initialized cluster if a pod cannot be
# scheduled since its related endpoint (and so dns entry) won't be
# created During initialization etcd will fail to resolve the name
# and retry.
#
#for i in $(seq 0 $((${CLUSTER_SIZE} - 1))); do
# while true; do
# echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up"
# ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break
# sleep 1s
# done
#done
PEERS=""
for i in $(seq 0 $((${CLUSTER_SIZE} - 1))); do
PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380"
done
# start etcd. If cluster is already initialized the `--initial-*` options will be ignored.
exec etcd --name ${HOSTNAME} \
--listen-peer-urls http://${IP}:2380 \
--listen-client-urls http://${IP}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster ${PEERS} \
--initial-cluster-state new \
--data-dir /var/run/etcd/default.etcd
## We are using dynamic pv provisioning using the "standard" storage class so
## this resource can be directly deployed without changes to minikube (since
## minikube defines this class for its minikube hostpath provisioner). In
## production define your own way to use pv claims.
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi