diff --git a/cephfs/Dockerfile b/cephfs/Dockerfile new file mode 100644 index 00000000..be06422a --- /dev/null +++ b/cephfs/Dockerfile @@ -0,0 +1,21 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM centos:7 +RUN rpm -Uvh https://download.ceph.com/rpm-jewel/el7/noarch/ceph-release-1-1.el7.noarch.rpm +RUN yum install -y epel-release +RUN yum install -y ceph-common python-cephfs +ADD cephfs-provisioner /usr/local/bin/cephfs-provisioner +ADD cephfs_provisioner/cephfs_provisioner.py /usr/local/bin/cephfs_provisioner +CMD chmod x+o /usr/local/bin/cephfs_provisioner diff --git a/cephfs/README.md b/cephfs/README.md new file mode 100644 index 00000000..e36b69cb --- /dev/null +++ b/cephfs/README.md @@ -0,0 +1,54 @@ +# CephFS Volume Provisioner for Kubernetes 1.5+ + +Using Ceph volume client + +# Test instruction + +* Build cephfs-provisioner and container image + +```bash +go build cephfs-provisioner.go +docker build -t cephfs-provisioner . +``` + +* Start Kubernetes local cluster + +* Create a Ceph admin secret + +```bash +ceph auth get client.admin 2>&1 |grep "key = " |a^C '{print $3'} |xargs echo -n > /tmp/secret +kubectl create secret generic ceph-secret-admin --from-file=/tmp/secret --namespace=kube-system +``` + +* Start CephFS provisioner + +Assume kubeconfig is at `/root/.kube`: + +```bash +docker run -ti -v /root/.kube:/kube --privileged --net=host cephfs-provisioner /usr/local/bin/cephfs-provisioner -master=http://127.0.0.1:8080 -kubeconfig=/kube/config +``` + +* Create a CephFS Storage Class + +```bash +kubectl create -f class.yaml +``` + +* Create a claim + +```bash +kubectl create -f claim.yaml +``` + +* Create a Pod using the claim + +```bash +kubectl create -f test-pod.yaml +``` + + +# Known limitations + +* Kernel CephFS doesn't work with SELinux, setting SELinux label in Pod's securityContext will not work. +* Kernel CephFS doesn't support quota or capacity, capacity requested by PVC is not enforced or validated. +* Currently each Ceph user created by the provisioner has `allow r` MDS cap to permit CephFS mount. \ No newline at end of file diff --git a/cephfs/ceph-secret-admin.yaml b/cephfs/ceph-secret-admin.yaml new file mode 100644 index 00000000..c3a741ad --- /dev/null +++ b/cephfs/ceph-secret-admin.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ceph-secret-admin +type: "kubernetes.io/cephfs" +data: +#Please note this value is base64 encoded. + key: QVFDTXBIOVlNNFExQmhBQVhHTlF5eU9uZThac1hxV0dvbi9kSVE9PQ== diff --git a/cephfs/cephfs-provisioner.go b/cephfs/cephfs-provisioner.go new file mode 100644 index 00000000..9ca661a8 --- /dev/null +++ b/cephfs/cephfs-provisioner.go @@ -0,0 +1,322 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "errors" + "flag" + "fmt" + "os/exec" + "strings" + "time" + + "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/pkg/api/v1" + storage "k8s.io/client-go/pkg/apis/storage/v1beta1" + "k8s.io/client-go/pkg/types" + "k8s.io/client-go/pkg/util/uuid" + "k8s.io/client-go/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + resyncPeriod = 15 * time.Second + provisionerName = "kubernetes.io/cephfs" + exponentialBackOffOnError = false + failedRetryThreshold = 5 + provisionCmd = "/usr/local/bin/cephfs_provisioner" + provisionerIDAnn = "cephFSProvisionerIdentity" + cephShareAnn = "cephShare" +) + +type provisionOutput struct { + Path string `json:"path"` + User string `json:"user"` + Secret string `json:"auth"` +} + +type cephFSProvisioner struct { + // Kubernetes Client. Use to retrieve Ceph admin secret + client kubernetes.Interface + // Identity of this cephFSProvisioner, generated. Used to identify "this" + // provisioner's PVs. + identity types.UID +} + +func newCephFSProvisioner(client kubernetes.Interface) controller.Provisioner { + return &cephFSProvisioner{ + client: client, + identity: uuid.NewUUID(), + } +} + +var _ controller.Provisioner = &cephFSProvisioner{} + +// Provision creates a storage asset and returns a PV object representing it. +func (p *cephFSProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) { + if options.PVC.Spec.Selector != nil { + return nil, fmt.Errorf("claim Selector is not supported") + } + cluster, adminID, adminSecret, mon, err := p.parseParameters(options.Parameters) + if err != nil { + return nil, err + } + // create random share name + share := fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.NewUUID()) + // create random user id + user := fmt.Sprintf("kubernetes-dynamic-user-%s", uuid.NewUUID()) + // provision share + // create cmd + cmd := exec.Command(provisionCmd, "-n", share, "-u", user) + // set env + cmd.Env = []string{ + "CEPH_CLUSTER_NAME=" + cluster, + "CEPH_MON=" + strings.Join(mon[:], ","), + "CEPH_AUTH_ID=" + adminID, + "CEPH_AUTH_KEY=" + adminSecret} + + output, cmdErr := cmd.CombinedOutput() + if cmdErr != nil { + glog.Errorf("failed to provision share %q for %q, err: %v, output: %v", share, user, cmdErr, string(output)) + return nil, cmdErr + } + // validate output + res := &provisionOutput{} + json.Unmarshal([]byte(output), &res) + if res.User == "" || res.Secret == "" || res.Path == "" { + return nil, fmt.Errorf("invalid provisioner output") + } + // create secret in PVC's namespace + nameSpace := options.PVC.Namespace + secretName := "ceph-" + user + "-secret" + secret := &v1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Namespace: nameSpace, + Name: secretName, + }, + Data: map[string][]byte{ + "key": []byte(res.Secret), + }, + Type: "Opaque", + } + + _, err = p.client.Core().Secrets(nameSpace).Create(secret) + if err != nil { + return nil, fmt.Errorf("failed to create secret") + } + + if err != nil { + glog.Errorf("Cephfs Provisioner: create volume failed, err: %v", err) + return nil, err + } + + pv := &v1.PersistentVolume{ + ObjectMeta: v1.ObjectMeta{ + Name: options.PVName, + Annotations: map[string]string{ + provisionerIDAnn: string(p.identity), + cephShareAnn: share, + }, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + v1.ReadOnlyMany, + v1.ReadWriteMany, + }, + Capacity: v1.ResourceList{ //FIXME: kernel cephfs doesn't enforce quota, capacity is not meaningless here. + v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + CephFS: &v1.CephFSVolumeSource{ + Monitors: mon, + Path: res.Path[strings.Index(res.Path, "/"):], + SecretRef: &v1.LocalObjectReference{ + Name: secretName, + }, + User: user, + }, + }, + }, + } + + glog.Infof("successfully created CephFS share %+v", pv.Spec.PersistentVolumeSource.CephFS) + + return pv, nil +} + +// Delete removes the storage asset that was created by Provision represented +// by the given PV. +func (p *cephFSProvisioner) Delete(volume *v1.PersistentVolume) error { + ann, ok := volume.Annotations[provisionerIDAnn] + if !ok { + return errors.New("identity annotation not found on PV") + } + if ann != string(p.identity) { + return &controller.IgnoredError{"identity annotation on PV does not match ours"} + } + share, ok := volume.Annotations[cephShareAnn] + if !ok { + return errors.New("ceph share annotation not found on PV") + } + // delete CephFS + class, err := p.getClassForVolume(volume) + if err != nil { + return err + } + cluster, adminID, adminSecret, mon, err := p.parseParameters(class.Parameters) + if err != nil { + return err + } + user := volume.Spec.PersistentVolumeSource.CephFS.User + // create cmd + cmd := exec.Command(provisionCmd, "-r", "-n", share, "-u", user) + // set env + cmd.Env = []string{ + "CEPH_CLUSTER_NAME=" + cluster, + "CEPH_MON=" + strings.Join(mon[:], ","), + "CEPH_AUTH_ID=" + adminID, + "CEPH_AUTH_KEY=" + adminSecret} + + output, cmdErr := cmd.CombinedOutput() + if cmdErr != nil { + glog.Errorf("failed to delete share %q for %q, err: %v, output: %v", share, user, cmdErr, string(output)) + return cmdErr + } + + return nil +} + +func (p *cephFSProvisioner) parseParameters(parameters map[string]string) (string, string, string, []string, error) { + var ( + err error + mon []string + cluster, adminID, adminSecretName, adminSecretNamespace, adminSecret string + ) + + adminSecretNamespace = "default" + adminID = "admin" + cluster = "ceph" + + for k, v := range parameters { + switch strings.ToLower(k) { + case "cluster": + cluster = v + case "monitors": + arr := strings.Split(v, ",") + for _, m := range arr { + mon = append(mon, m) + } + case "adminid": + adminID = v + case "adminsecretname": + adminSecretName = v + case "adminsecretnamespace": + adminSecretNamespace = v + default: + return "", "", "", nil, fmt.Errorf("invalid option %q", k) + } + } + // sanity check + if adminSecretName == "" { + return "", "", "", nil, fmt.Errorf("missing Ceph admin secret name") + } + if adminSecret, err = p.parsePVSecret(adminSecretNamespace, adminSecretName); err != nil { + return "", "", "", nil, fmt.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err) + } + if len(mon) < 1 { + return "", "", "", nil, fmt.Errorf("missing Ceph monitors") + } + return cluster, adminID, adminSecret, mon, nil +} + +func (p *cephFSProvisioner) parsePVSecret(namespace, secretName string) (string, error) { + if p.client == nil { + return "", fmt.Errorf("Cannot get kube client") + } + secrets, err := p.client.Core().Secrets(namespace).Get(secretName) + if err != nil { + return "", err + } + for _, data := range secrets.Data { + return string(data), nil + } + + // If not found, the last secret in the map wins as done before + return "", fmt.Errorf("no secret found") +} + +func (p *cephFSProvisioner) getClassForVolume(pv *v1.PersistentVolume) (*storage.StorageClass, error) { + className, found := pv.Annotations["volume.beta.kubernetes.io/storage-class"] + if !found { + return nil, fmt.Errorf("Volume has no class annotation") + } + + class, err := p.client.Storage().StorageClasses().Get(className) + if err != nil { + return nil, err + } + return class, nil +} + +var ( + master = flag.String("master", "", "Master URL") + kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig") +) + +func main() { + flag.Parse() + flag.Set("logtostderr", "true") + + var config *rest.Config + var err error + if *master != "" || *kubeconfig != "" { + config, err = clientcmd.BuildConfigFromFlags(*master, *kubeconfig) + } else { + config, err = rest.InClusterConfig() + } + + if err != nil { + glog.Fatalf("Failed to create config: %v", err) + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + glog.Fatalf("Failed to create client: %v", err) + } + + // The controller needs to know what the server version is because out-of-tree + // provisioners aren't officially supported until 1.5 + serverVersion, err := clientset.Discovery().ServerVersion() + if err != nil { + glog.Fatalf("Error getting server version: %v", err) + } + + // Create the provisioner: it implements the Provisioner interface expected by + // the controller + cephFSProvisioner := newCephFSProvisioner(clientset) + + // Start the provision controller which will dynamically provision cephFS + // PVs + pc := controller.NewProvisionController(clientset, resyncPeriod, provisionerName, cephFSProvisioner, serverVersion.GitVersion, exponentialBackOffOnError, failedRetryThreshold, 2*resyncPeriod, resyncPeriod, resyncPeriod/2, 2*resyncPeriod) + + pc.Run(wait.NeverStop) +} diff --git a/cephfs/cephfs_provisioner/__init__.py b/cephfs/cephfs_provisioner/__init__.py new file mode 100644 index 00000000..da9ca2ec --- /dev/null +++ b/cephfs/cephfs_provisioner/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/cephfs/cephfs_provisioner/cephfs_provisioner.py b/cephfs/cephfs_provisioner/cephfs_provisioner.py new file mode 100755 index 00000000..fcd058bf --- /dev/null +++ b/cephfs/cephfs_provisioner/cephfs_provisioner.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import rados +import getopt +import sys +import json + +""" +CEPH_CLUSTER_NAME=test CEPH_MON=172.24.0.4 CEPH_AUTH_ID=admin CEPH_AUTH_KEY=AQCMpH9YM4Q1BhAAXGNQyyOne8ZsXqWGon/dIQ== cephfs_provisioner.py -n foo -u bar +""" +try: + import ceph_volume_client + ceph_module_found = True +except ImportError as e: + ceph_volume_client = None + ceph_module_found = False + +VOlUME_GROUP="kubernetes" +CONF_PATH="/etc/ceph/" + +class CephFSNativeDriver(object): + """Driver for the Ceph Filesystem. + + This driver is 'native' in the sense that it exposes a CephFS filesystem + for use directly by guests, with no intermediate layer like NFS. + """ + + def __init__(self, *args, **kwargs): + self._volume_client = None + + + def _create_conf(self, cluster_name, mons): + """ Create conf using monitors + Create a minimal ceph conf with monitors and cephx + """ + conf_path = CONF_PATH + cluster_name + ".conf" + conf = open(conf_path, 'w') + conf.write("[global]\n") + conf.write("mon_host = " + mons + "\n") + conf.write("auth_cluster_required = cephx\nauth_service_required = cephx\nauth_client_required = cephx\n") + conf.close() + return conf_path + + def _create_keyring(self, cluster_name, id, key): + """ Create client keyring using id and key + """ + keyring = open(CONF_PATH + cluster_name + "." + "client." + id + ".keyring", 'w') + keyring.write("[client." + id + "]\n") + keyring.write("key = " + key + "\n") + keyring.write("caps mds = \"allow *\"\n") + keyring.write("caps mon = \"allow *\"\n") + keyring.write("caps osd = \"allow *\"\n") + keyring.close() + + @property + def volume_client(self): + if self._volume_client: + return self._volume_client + + if not ceph_module_found: + raise ValueError("Ceph client libraries not found.") + + try: + cluster_name = os.environ["CEPH_CLUSTER_NAME"] + except KeyError: + cluster_name = "ceph" + try: + mons = os.environ["CEPH_MON"] + except KeyError: + raise ValueError("Missing CEPH_MON env") + try: + auth_id = os.environ["CEPH_AUTH_ID"] + except KeyError: + raise ValueError("Missing CEPH_AUTH_ID") + try: + auth_key = os.environ["CEPH_AUTH_KEY"] + except: + raise ValueError("Missing CEPH_AUTH_KEY") + + conf_path = self._create_conf(cluster_name, mons) + self._create_keyring(cluster_name, auth_id, auth_key) + + self._volume_client = ceph_volume_client.CephFSVolumeClient( + auth_id, conf_path, cluster_name) + try: + self._volume_client.connect(None) + except Exception: + self._volume_client = None + raise + + return self._volume_client + + def _authorize_ceph(self, volume_path, auth_id, readonly): + path = self._volume_client._get_path(volume_path) + + # First I need to work out what the data pool is for this share: + # read the layout + pool_name = self._volume_client._get_ancestor_xattr(path, "ceph.dir.layout.pool") + namespace = self._volume_client.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + + # Now construct auth capabilities that give the guest just enough + # permissions to access the share + client_entity = "client.{0}".format(auth_id) + want_access_level = 'r' if readonly else 'rw' + want_mds_cap = 'allow r,allow {0} path={1}'.format(want_access_level, path) + want_osd_cap = 'allow {0} pool={1} namespace={2}'.format( + want_access_level, pool_name, namespace) + + try: + existing = self._volume_client._rados_command( + 'auth get', + { + 'entity': client_entity + } + ) + # FIXME: rados raising Error instead of ObjectNotFound in auth get failure + except rados.Error: + caps = self._volume_client._rados_command( + 'auth get-or-create', + { + 'entity': client_entity, + 'caps': [ + 'mds', want_mds_cap, + 'osd', want_osd_cap, + 'mon', 'allow r'] + }) + else: + # entity exists, update it + cap = existing[0] + + # Construct auth caps that if present might conflict with the desired + # auth caps. + unwanted_access_level = 'r' if want_access_level is 'rw' else 'rw' + unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, path) + unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format( + unwanted_access_level, pool_name, namespace) + + def cap_update(orig, want, unwanted): + # Updates the existing auth caps such that there is a single + # occurrence of wanted auth caps and no occurrence of + # conflicting auth caps. + + cap_tokens = set(orig.split(",")) + + cap_tokens.discard(unwanted) + cap_tokens.add(want) + + return ",".join(cap_tokens) + + osd_cap_str = cap_update(cap['caps'].get('osd', ""), want_osd_cap, unwanted_osd_cap) + mds_cap_str = cap_update(cap['caps'].get('mds', ""), want_mds_cap, unwanted_mds_cap) + + caps = self._volume_client._rados_command( + 'auth caps', + { + 'entity': client_entity, + 'caps': [ + 'mds', mds_cap_str, + 'osd', osd_cap_str, + 'mon', cap['caps'].get('mon')] + }) + caps = self._volume_client._rados_command( + 'auth get', + { + 'entity': client_entity + } + ) + + # Result expected like this: + # [ + # { + # "entity": "client.foobar", + # "key": "AQBY0\/pViX\/wBBAAUpPs9swy7rey1qPhzmDVGQ==", + # "caps": { + # "mds": "allow *", + # "mon": "allow *" + # } + # } + # ] + assert len(caps) == 1 + assert caps[0]['entity'] == client_entity + return caps[0] + + + def create_share(self, path, user_id, size=None): + """Create a CephFS volume. + """ + volume_path = ceph_volume_client.VolumePath(VOlUME_GROUP, path) + + # Create the CephFS volume + volume = self.volume_client.create_volume(volume_path, size=size) + + # To mount this you need to know the mon IPs and the path to the volume + mon_addrs = self.volume_client.get_mon_addrs() + + export_location = "{addrs}:{path}".format( + addrs=",".join(mon_addrs), + path=volume['mount_path']) + + """TODO + restrict to user_id + """ + auth_result = self._authorize_ceph(volume_path, user_id, False) + ret = { + 'path': export_location, + 'user': auth_result['entity'], + 'auth': auth_result['key'] + } + return json.dumps(ret) + + + def delete_share(self, path, user_id): + volume_path = ceph_volume_client.VolumePath(VOlUME_GROUP, path) + self.volume_client._deauthorize(volume_path, user_id) + self.volume_client.delete_volume(volume_path) + self.volume_client.purge_volume(volume_path) + + def __del__(self): + if self._volume_client: + self._volume_client.disconnect() + self._volume_client = None + +def main(): + create = True + share = "" + user = "" + cephfs = CephFSNativeDriver() + try: + opts, args = getopt.getopt(sys.argv[1:], "rn:u:", ["remove"]) + except getopt.GetoptError: + print "Usage: " + sys.argv[0] + " --remove -n share_name -u ceph_user_id" + sys.exit(1) + + for opt, arg in opts: + if opt == '-n': + share = arg + elif opt == '-u': + user = arg + elif opt in ("-r", "--remove"): + create = False + + if share == "" or user == "": + print "Usage: " + sys.argv[0] + " --remove -n share_name -u ceph_user_id" + sys.exit(1) + + if create == True: + print cephfs.create_share(share, user) + else: + cephfs.delete_share(share, user) + + +if __name__ == "__main__": + main() diff --git a/cephfs/claim.yaml b/cephfs/claim.yaml new file mode 100644 index 00000000..2dca6aec --- /dev/null +++ b/cephfs/claim.yaml @@ -0,0 +1,12 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: claim1 + annotations: + volume.beta.kubernetes.io/storage-class: "cephfs" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi diff --git a/cephfs/class.yaml b/cephfs/class.yaml new file mode 100644 index 00000000..f7107537 --- /dev/null +++ b/cephfs/class.yaml @@ -0,0 +1,11 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: cephfs +provisioner: kubernetes.io/cephfs +parameters: + monitors: 172.24.0.4:6789 + adminId: admin + adminSecretName: ceph-secret-admin + adminSecretNamespace: "kube-system" + diff --git a/cephfs/local-start.sh b/cephfs/local-start.sh new file mode 100644 index 00000000..74f9bb51 --- /dev/null +++ b/cephfs/local-start.sh @@ -0,0 +1,17 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +PATH=${PATH}:`pwd` +cephfs-provisioner -master=http://127.0.0.1:8080 -kubeconfig=/root/.kube/config -logtostderr + diff --git a/cephfs/test-pod.yaml b/cephfs/test-pod.yaml new file mode 100644 index 00000000..4888676b --- /dev/null +++ b/cephfs/test-pod.yaml @@ -0,0 +1,21 @@ +kind: Pod +apiVersion: v1 +metadata: + name: test-pod +spec: + containers: + - name: test-pod + image: gcr.io/google_containers/busybox:1.24 + command: + - "/bin/sh" + args: + - "-c" + - "touch /mnt/SUCCESS && exit 0 || exit 1" + volumeMounts: + - name: pvc + mountPath: "/mnt" + restartPolicy: "Never" + volumes: + - name: pvc + persistentVolumeClaim: + claimName: claim1